code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__lowerCAmelCase : Dict =DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__lowerCAmelCase : List[Any] ="main"
# Default branch name
__lowerCAmelCase : int ="f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"
# One particular commit (not the top of `main`)
__lowerCAmelCase : List[Any] ="aaaaaaa"
# This commit does not exist, so we should 404.
__lowerCAmelCase : Optional[int] ="d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"
# Sha-1 of config.json on the top of `main`, for checking purposes
__lowerCAmelCase : Dict ="4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"
@contextlib.contextmanager
def UpperCamelCase ( ):
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def UpperCamelCase ( ):
print("Bonjour!" )
yield
print("Au revoir!" )
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :str )-> List[str]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class UpperCAmelCase ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :Union[str, Any] )-> Any:
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[Any] )-> Tuple:
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :int )-> Union[str, Any]:
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def UpperCAmelCase_ ( self :int )-> Dict:
self.assertEqual(find_labels(lowercase_ ) , ["labels"] )
self.assertEqual(find_labels(lowercase_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(lowercase_ ) , ["start_positions", "end_positions"] )
class UpperCAmelCase ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(lowercase_ ) , ["labels"] )
@require_tf
def UpperCAmelCase_ ( self :Union[str, Any] )-> Union[str, Any]:
self.assertEqual(find_labels(lowercase_ ) , ["labels"] )
self.assertEqual(find_labels(lowercase_ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(lowercase_ ) , ["start_positions", "end_positions"] )
class UpperCAmelCase ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(lowercase_ ) , ["labels"] )
@require_flax
def UpperCAmelCase_ ( self :Dict )-> str:
# Flax models don't have labels
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
self.assertEqual(find_labels(lowercase_ ) , [] )
class UpperCAmelCase ( UpperCamelCase__ ):
pass
self.assertEqual(find_labels(lowercase_ ) , [] )
| 237 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCamelCase__ ):
__lowercase = ["""note_seq"""]
def __init__( self :Optional[Any] , *lowercase_ :List[Any] , **lowercase_ :List[str] )-> int:
requires_backends(self , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :str , *lowercase_ :Union[str, Any] , **lowercase_ :Any )-> Optional[int]:
requires_backends(cls , ["note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls :Dict , *lowercase_ :Tuple , **lowercase_ :List[Any] )-> Optional[Any]:
requires_backends(cls , ["note_seq"] )
| 237 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : Optional[int] ={
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any =['''ConvNextFeatureExtractor''']
A__ : str =['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] =[
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str =[
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A__ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 220 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
A__ : str =5_00_00
A__ : Optional[int] =50_00
A__ , A__ : Optional[int] =os.path.split(__file__)
A__ : Tuple =os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for i in range(lowerCAmelCase ):
_lowerCAmelCase = dataset[i]
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ):
_lowerCAmelCase = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(lowerCAmelCase ):
_lowerCAmelCase = dataset[i]
@get_duration
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with dataset.formatted_as(type=lowerCAmelCase ):
for i in range(0 , lowerCAmelCase , lowerCAmelCase ):
_lowerCAmelCase = dataset[i : i + batch_size]
def UpperCamelCase__ ( ):
"""simple docstring"""
_lowerCAmelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
_lowerCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
_lowerCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
_lowerCAmelCase = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
_lowerCAmelCase = generate_example_dataset(
os.path.join(lowerCAmelCase , """dataset.arrow""" ) , lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase ) )
_lowerCAmelCase = func(lowerCAmelCase , **lowerCAmelCase )
print("""shuffling dataset""" )
_lowerCAmelCase = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(lowerCAmelCase ) )
_lowerCAmelCase = func(
lowerCAmelCase , **lowerCAmelCase )
with open(lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 220 | 1 |
"""simple docstring"""
import itertools
import math
def lowercase ( A_ )-> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase ( )-> Tuple:
'''simple docstring'''
a : Tuple = 2
while True:
if is_prime(A_ ):
yield num
num += 1
def lowercase ( A_ = 10_001 )-> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_lowercase : Optional[int] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
}
}
_lowercase : str = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
_lowercase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : str = 1
lowercase_ : str = len(self.sp_model )
lowercase_ : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()}
lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase_ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase_ : Optional[Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase_ : Optional[Any] = src_lang if src_lang is not None else '''en_XX'''
lowercase_ : str = self.lang_code_to_id[self._src_lang]
lowercase_ : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.__dict__.copy()
lowercase_ : Dict = None
lowercase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : Dict = {}
lowercase_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = [1] * len(self.prefix_tokens )
lowercase_ : Tuple = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Optional[Any] = src_lang
lowercase_ : Dict = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Any = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : int = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Tuple = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en_XX" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro_RO" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : List[str] = src_lang
lowercase_ : int = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = self.lang_code_to_id[src_lang]
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.lang_code_to_id[lang]
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
| 93 | 0 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase__ = (720, 1_280) # Height, Width
lowerCAmelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase__ = 1 / 100
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 250
def a__ ( ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : List[str] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[str] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase : List[Any] = random_chars(3_2 )
lowerCAmelCase : Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
lowerCAmelCase : Union[str, Any] = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
lowerCAmelCase : List[Any] = []
for anno in new_annos:
lowerCAmelCase : str = anno[3] - anno[1]
lowerCAmelCase : List[Any] = anno[4] - anno[2]
lowerCAmelCase : Optional[int] = anno[1] + width / 2
lowerCAmelCase : List[str] = anno[2] + height / 2
lowerCAmelCase : Optional[int] = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = []
lowerCAmelCase : Union[str, Any] = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , "*.txt" ) ):
lowerCAmelCase : str = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
lowerCAmelCase : List[str] = in_file.readlines()
lowerCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
lowerCAmelCase : Optional[int] = []
for obj_list in obj_lists:
lowerCAmelCase : Optional[Any] = obj_list.rstrip("\n" ).split(" " )
lowerCAmelCase : Union[str, Any] = float(obj[1] ) - float(obj[3] ) / 2
lowerCAmelCase : Tuple = float(obj[2] ) - float(obj[4] ) / 2
lowerCAmelCase : List[str] = float(obj[1] ) + float(obj[3] ) / 2
lowerCAmelCase : Optional[Any] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def a__ ( SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : tuple[int, int] , SCREAMING_SNAKE_CASE : tuple[float, float] , SCREAMING_SNAKE_CASE : float = 0.0 , ):
'''simple docstring'''
lowerCAmelCase : List[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCAmelCase : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase : Union[str, Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase : Dict = int(scale_x * output_size[1] )
lowerCAmelCase : List[Any] = int(scale_y * output_size[0] )
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[Any] = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[int] = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = all_annos[index]
lowerCAmelCase : Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
lowerCAmelCase : Any = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
lowerCAmelCase : Optional[Any] = img
for bbox in img_annos:
lowerCAmelCase : Union[str, Any] = bbox[1] * scale_x
lowerCAmelCase : Dict = bbox[2] * scale_y
lowerCAmelCase : Optional[int] = bbox[3] * scale_x
lowerCAmelCase : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCAmelCase : Optional[int] = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
lowerCAmelCase : Optional[int] = img
for bbox in img_annos:
lowerCAmelCase : Dict = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase : Dict = bbox[2] * scale_y
lowerCAmelCase : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCAmelCase : Union[str, Any] = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase : List[Any] = img
for bbox in img_annos:
lowerCAmelCase : str = bbox[1] * scale_x
lowerCAmelCase : Tuple = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase : Optional[Any] = bbox[3] * scale_x
lowerCAmelCase : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCAmelCase : List[Any] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase : Optional[Any] = img
for bbox in img_annos:
lowerCAmelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase : str = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase : int = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase : str = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCAmelCase : Union[str, Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 133 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 133 | 1 |
def _UpperCAmelCase ( snake_case = 60_08_51_47_51_43 ):
"""simple docstring"""
try:
_lowerCAmelCase = int(snake_case )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
_lowerCAmelCase = 2
_lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
_lowerCAmelCase = i
while n % i == 0:
_lowerCAmelCase = n // i
i += 1
return int(snake_case )
if __name__ == "__main__":
print(f"{solution() = }")
| 82 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = AudioLDMPipeline
__lowerCamelCase = TEXT_TO_AUDIO_PARAMS
__lowerCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCamelCase = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase = ClapTextModelWithProjection(_snake_case )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , )
_lowerCAmelCase = SpeechTaHifiGan(_snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
embeds.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """egg cracking"""
_lowerCAmelCase = audioldm_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = ["""hey"""]
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase = SpeechTaHifiGan(_snake_case ).to(_snake_case )
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = 25
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[77230:77240]
_lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[27780:27790]
_lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 82 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = None
lowerCAmelCase_ = BloomTokenizerFast
lowerCAmelCase_ = BloomTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = '''tokenizer_file'''
lowerCAmelCase_ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
lowercase_ : List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.get_rust_tokenizer()
lowercase_ : Optional[Any] = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
lowercase_ : Union[str, Any] = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
lowercase_ : Dict = tokenizer.batch_encode_plus(__SCREAMING_SNAKE_CASE )['''input_ids''']
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase_ : Optional[int] = '''This is a simple input'''
lowercase_ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
lowercase_ : List[str] = ('''This is a simple input''', '''This is a pair''')
lowercase_ : Union[str, Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
tokenizer_r.batch_encode_plus(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
tokenizer_r.encode(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
tokenizer_r.batch_encode_plus(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''' )
lowercase_ : Optional[Any] = None # Hotfixing padding = None
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' , )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding='''max_length''' , )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.get_rust_tokenizer()
lowercase_ : Tuple = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = next(iter(__SCREAMING_SNAKE_CASE ) )['''premise'''] # pick up one data
lowercase_ : List[Any] = list(sample_data.values() )
lowercase_ : Tuple = list(map(tokenizer.encode , __SCREAMING_SNAKE_CASE ) )
lowercase_ : Any = [tokenizer.decode(__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE ) for x in output_tokens]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 264 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase_ ( __lowercase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = analyze_text(__lowercase )
_UpperCAmelCase = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCAmelCase = sum(single_char_strings.values() )
# one length string
_UpperCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCAmelCase = single_char_strings[ch]
_UpperCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(__lowercase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
_UpperCAmelCase = sum(two_char_strings.values() )
_UpperCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCAmelCase = cha + cha
if sequence in two_char_strings:
_UpperCAmelCase = two_char_strings[sequence]
_UpperCAmelCase = int(__lowercase ) / all_sum
my_sec_sum += prob * math.loga(__lowercase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def UpperCAmelCase_ ( __lowercase : str ) -> tuple[dict, dict]:
'''simple docstring'''
_UpperCAmelCase = Counter() # type: ignore
_UpperCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 22 |
"""simple docstring"""
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(A_ )
elif element > pivot:
greater.append(A_ )
else:
equal.append(A_ )
return less, equal, greater
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(A_ ) or index < 0:
return None
lowerCAmelCase__ : str = items[random.randint(0 , len(A_ ) - 1 )]
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = _partition(A_ , A_ )
lowerCAmelCase__ : str = len(A_ )
lowerCAmelCase__ : Optional[Any] = len(A_ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(A_ , A_ )
# must be in larger
else:
return quick_select(A_ , index - (m + count) )
| 106 | 0 |
def lowercase( UpperCamelCase_ ) -> bool:
'''simple docstring'''
UpperCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase( UpperCamelCase_ = 5000 ) -> int:
'''simple docstring'''
UpperCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCamelCase_ )]
for i, pentagonal_i in enumerate(UpperCamelCase_ ):
for j in range(UpperCamelCase_ , len(UpperCamelCase_ ) ):
UpperCamelCase = pentagonal_nums[j]
UpperCamelCase = pentagonal_i + pentagonal_j
UpperCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCamelCase_ ) and is_pentagonal(UpperCamelCase_ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 371 | from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
_SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
_SCREAMING_SNAKE_CASE = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """whisper"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] , lowerCamelCase_ : Tuple=5_1865 , lowerCamelCase_ : Dict=80 , lowerCamelCase_ : str=6 , lowerCamelCase_ : List[Any]=4 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : Optional[int]=4 , lowerCamelCase_ : Optional[int]=1536 , lowerCamelCase_ : int=1536 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : str=5_0257 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : int=256 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Any=0.0_2 , lowerCamelCase_ : str=False , lowerCamelCase_ : List[str]=1500 , lowerCamelCase_ : Dict=448 , lowerCamelCase_ : Tuple=5_0256 , lowerCamelCase_ : Tuple=5_0256 , lowerCamelCase_ : List[Any]=5_0256 , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[int]=[220, 5_0256] , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : Dict=256 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : List[Any]=0.0_5 , lowerCamelCase_ : Dict=10 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : str=10 , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : Optional[int]=7 , **lowerCamelCase_ : Any , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = num_mel_bins
UpperCamelCase = d_model
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = max_source_positions
UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , suppress_tokens=lowerCamelCase_ , begin_suppress_tokens=lowerCamelCase_ , **lowerCamelCase_ , )
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
UpperCamelCase = {0: """batch"""}
else:
UpperCamelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction="""inputs""" )
return common_inputs
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 2_2050 , lowerCamelCase_ : float = 5.0 , lowerCamelCase_ : int = 220 , ):
"""simple docstring"""
UpperCamelCase = OrderedDict()
UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase_ , framework=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , time_duration=lowerCamelCase_ , frequency=lowerCamelCase_ , )
UpperCamelCase = encoder_inputs["""input_features"""].shape[2]
UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = encoder_inputs.pop("""input_features""" )
UpperCamelCase = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
UpperCamelCase = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return 1E-3
| 165 | 0 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Optional[int] = "ylacombe/bark-small"
lowercase : List[str] = tempfile.mkdtemp()
lowercase : List[str] = "en_speaker_1"
lowercase : Any = "This is a test string"
lowercase : Optional[Any] = "speaker_embeddings_path.json"
lowercase : Any = "speaker_embeddings"
def __magic_name__ ( self , **_a ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_a )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : Union[str, Any] = BarkProcessor(tokenizer=_a )
processor.save_pretrained(self.tmpdirname )
lowercase : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__ ( self ):
lowercase : Tuple = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase : Dict = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__ ( self ):
lowercase : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase : List[str] = 35
lowercase : str = 2
lowercase : Tuple = 8
lowercase : List[str] = {
"semantic_prompt": np.ones(_a ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase : Optional[int] = processor(text=self.input_string , voice_preset=_a )
lowercase : str = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase : Optional[Any] = os.path.join(self.tmpdirname , "file.npz" )
np.savez(_a , **_a )
lowercase : Optional[Any] = processor(text=self.input_string , voice_preset=_a )
lowercase : Union[str, Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase : Dict = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Optional[Any] = BarkProcessor(tokenizer=_a )
lowercase : Dict = processor(text=self.input_string )
lowercase : str = tokenizer(
self.input_string , padding="max_length" , max_length=256 , add_special_tokens=_a , return_attention_mask=_a , return_token_type_ids=_a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 202 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict = logging.get_logger(__name__)
def __magic_name__ ( __snake_case : Any ) -> Any:
lowercase : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase : Optional[int] = 1024
lowercase : Dict = 4096
lowercase : Union[str, Any] = 24
lowercase : str = 16
lowercase : Dict = [5, 11, 17, 23]
lowercase : Any = [256, 512, 1024, 1024]
lowercase : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase : List[Any] = True
lowercase : Union[str, Any] = 150
lowercase : Dict = "huggingface/label-files"
lowercase : Optional[Any] = "ade20k-id2label.json"
lowercase : Optional[int] = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type="dataset" ) ) , "r" ) )
lowercase : List[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[int]:
lowercase : Optional[Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase : Tuple = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
lowercase : Tuple = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
lowercase : Optional[int] = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
lowercase : List[Any] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
lowercase : str = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase : Any = name.replace("proj" , "projection" )
if "blocks" in name:
lowercase : Tuple = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
lowercase : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase : str = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
lowercase : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase : Any = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
lowercase : Optional[Any] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
lowercase : Optional[int] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
lowercase : Any = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
lowercase : Tuple = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
lowercase : int = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
lowercase : Union[str, Any] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
lowercase : int = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase : Union[str, Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowercase : Tuple = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
lowercase : Union[str, Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
lowercase : List[Any] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
lowercase : Optional[int] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
lowercase : Union[str, Any] = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase : str = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase : str = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase : Optional[int] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase : str = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase : str = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase : Any = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
lowercase : Optional[int] = name.replace("bn" , "batch_norm" )
if "head" in name:
lowercase : Union[str, Any] = name.replace("head" , "head.head" )
if "encoder.norm" in name:
lowercase : List[str] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
lowercase : Optional[Any] = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> Any:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Union[str, Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowercase : List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase : int = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : int = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Tuple = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ) -> int:
lowercase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __snake_case : List[Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Tuple ) -> Tuple:
lowercase , lowercase : Tuple = get_dpt_config(__snake_case )
# load original state_dict from URL
lowercase : List[Any] = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__snake_case )
# rename keys
for key in state_dict.copy().keys():
lowercase : Any = state_dict.pop(__snake_case )
lowercase : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(__snake_case , __snake_case )
# load HuggingFace model
lowercase : List[str] = DPTForSemanticSegmentation(__snake_case ) if "ade" in checkpoint_url else DPTForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Check outputs on an image
lowercase : Any = 480 if "ade" in checkpoint_url else 384
lowercase : Optional[Any] = DPTImageProcessor(size=__snake_case )
lowercase : Any = prepare_img()
lowercase : Union[str, Any] = image_processor(__snake_case , return_tensors="pt" )
# forward pass
lowercase : Optional[int] = model(**__snake_case ).logits if "ade" in checkpoint_url else model(**__snake_case ).predicted_depth
# Assert logits
lowercase : Optional[int] = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
lowercase : List[str] = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(__snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __snake_case , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __snake_case )
)
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__snake_case , )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_A : Dict = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 202 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A : List[str] = logging.get_logger(__name__)
__A : Tuple = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__A : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase : int = model_type_to_module_name(_UpperCAmelCase )
lowerCAmelCase : Any = importlib.import_module(f".{module_name}", 'transformers.models' )
try:
return getattr(_UpperCAmelCase, _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase, '__name__', _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase : List[Any] = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase, _UpperCAmelCase ):
return getattr(_UpperCAmelCase, _UpperCAmelCase )
return None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, **_UpperCAmelCase, ):
'''simple docstring'''
lowerCAmelCase : Any = get_file_from_repo(
_UpperCAmelCase, _UpperCAmelCase, cache_dir=_UpperCAmelCase, force_download=_UpperCAmelCase, resume_download=_UpperCAmelCase, proxies=_UpperCAmelCase, use_auth_token=_UpperCAmelCase, revision=_UpperCAmelCase, local_files_only=_UpperCAmelCase, )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase, encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class __A :
def __init__( self : Tuple ):
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def lowercase__ ( cls : Any , UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : Optional[Any] = kwargs.pop('config' , UpperCAmelCase_ )
lowerCAmelCase : int = kwargs.pop('trust_remote_code' , UpperCAmelCase_ )
lowerCAmelCase : int = True
lowerCAmelCase : str = FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = config_dict.get('feature_extractor_type' , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
lowerCAmelCase : List[str] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase : str = AutoConfig.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
# It could be in `config.feature_extractor_type``
lowerCAmelCase : Any = getattr(UpperCAmelCase_ , 'feature_extractor_type' , UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCAmelCase : Optional[int] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
lowerCAmelCase : Optional[int] = feature_extractor_class_from_name(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = feature_extractor_auto_map is not None
lowerCAmelCase : Dict = feature_extractor_class is not None or type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING
lowerCAmelCase : Dict = resolve_trust_remote_code(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
lowerCAmelCase : Any = get_class_from_dynamic_module(
UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = kwargs.pop('code_revision' , UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
lowerCAmelCase : Union[str, Any] = FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase_ )]
return feature_extractor_class.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
raise ValueError(
f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def lowercase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase_ , UpperCAmelCase_ )
| 351 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
return x + 2
class __A ( unittest.TestCase ):
def lowercase__ ( self : int ):
lowerCAmelCase : List[str] = 'x = 3'
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
lowerCAmelCase : Dict = 'x = y'
lowerCAmelCase : List[Any] = {'y': 5}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = 'y = add_two(x)'
lowerCAmelCase : int = {'x': 3}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Tuple = 'x = 3'
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : Any ):
lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5'
lowerCAmelCase : str = {}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\''
lowerCAmelCase : str = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} )
def lowercase__ ( self : Dict ):
lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5'
lowerCAmelCase : Dict = {'x': 3}
lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} )
lowerCAmelCase : Any = {'x': 8}
lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = 'test_list = [x, add_two(x)]'
lowerCAmelCase : Optional[Any] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , [3, 5] )
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : int = 'y = x'
lowerCAmelCase : Optional[int] = {'x': 3}
lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ )
assert result == 3
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]'
lowerCAmelCase : List[str] = {'x': 3}
lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} )
lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
lowerCAmelCase : List[Any] = {'x': 3}
lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ )
assert result == 5
self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i'
lowerCAmelCase : str = {}
lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ )
assert result == 2
self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
| 323 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( A : list , A : int , A : int , A : int ) -> list:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase_ : List[Any] = result + left + right
return input_list
def __UpperCAmelCase ( A : list ) -> list:
if len(A ) <= 1:
return input_list
UpperCAmelCase_ : List[str] = list(A )
# iteration for two-way merging
UpperCAmelCase_ : Tuple = 2
while p <= len(A ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ : Union[str, Any] = i
UpperCAmelCase_ : int = i + p - 1
UpperCAmelCase_ : Any = (low + high + 1) // 2
UpperCAmelCase_ : Union[str, Any] = merge(A , A , A , A )
# final merge of last two parts
if p * 2 >= len(A ):
UpperCAmelCase_ : str = i
UpperCAmelCase_ : Tuple = merge(A , 0 , A , len(A ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCamelCase : str = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
_UpperCamelCase : List[str] = []
else:
_UpperCamelCase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 304 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 304 | 1 |
'''simple docstring'''
import torch
from torch import nn
class UpperCAmelCase ( nn.Module):
def __init__( self : List[str], a_ : Optional[Any], a_ : List[Any], a_ : Union[str, Any], a_ : Union[str, Any], a_ : List[Any]=1, a_ : List[str]=False ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = n_token
UpperCamelCase__ = d_embed
UpperCamelCase__ = d_proj
UpperCamelCase__ = cutoffs + [n_token]
UpperCamelCase__ = [0] + self.cutoffs
UpperCamelCase__ = div_val
UpperCamelCase__ = self.cutoffs[0]
UpperCamelCase__ = len(self.cutoffs ) - 1
UpperCamelCase__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase__ = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
UpperCamelCase__ = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase__ = nn.ModuleList()
UpperCamelCase__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
else:
self.out_projs.append(a_ )
self.out_layers.append(nn.Linear(a_, a_ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase__ , UpperCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
self.out_layers.append(nn.Linear(a_, r_idx - l_idx ) )
UpperCamelCase__ = keep_order
def lowercase_ ( self : Tuple, a_ : Dict, a_ : Tuple, a_ : str, a_ : Union[str, Any] ):
"""simple docstring"""
if proj is None:
UpperCamelCase__ = nn.functional.linear(a_, a_, bias=a_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase__ = nn.functional.linear(a_, proj.t().contiguous() )
UpperCamelCase__ = nn.functional.linear(a_, a_, bias=a_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase_ ( self : Union[str, Any], a_ : Union[str, Any], a_ : List[Any]=None, a_ : List[Any]=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase__ = hidden[..., :-1, :].contiguous()
UpperCamelCase__ = labels[..., 1:].contiguous()
UpperCamelCase__ = hidden.view(-1, hidden.size(-1 ) )
UpperCamelCase__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCamelCase__ = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase__ = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
UpperCamelCase__ = labels != -100
UpperCamelCase__ = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
UpperCamelCase__ = (
-nn.functional.log_softmax(a_, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ = self.out_layers[i].weight
UpperCamelCase__ = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCamelCase__ = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ = self._compute_logit(a_, a_, a_, a_ )
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=1 )
if labels is None:
UpperCamelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase__ = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
UpperCamelCase__ = 0
UpperCamelCase__ = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase__ = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase__ = labels.index_select(0, a_ ) - l_idx
UpperCamelCase__ = head_logprob.index_select(0, a_ )
UpperCamelCase__ = hidden.index_select(0, a_ )
else:
UpperCamelCase__ = hidden
if i == 0:
if labels is not None:
UpperCamelCase__ = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ = self._compute_logit(a_, a_, a_, a_ )
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=1 )
UpperCamelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase__ = logprob_i
if labels is not None:
if (hasattr(self, "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0, a_, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase_ ( self : Tuple, a_ : Optional[Any] ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCamelCase__ = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ = self.out_layers[i].weight
UpperCamelCase__ = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCamelCase__ = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ = self._compute_logit(a_, a_, a_, a_ )
UpperCamelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=1 )
UpperCamelCase__ = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ = self._compute_logit(a_, a_, a_, a_ )
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=1 )
UpperCamelCase__ = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase__ = logprob_i
return out | 31 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = ProphetNetTokenizer
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().setUp()
a =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
a ='''UNwant\u00E9d,running'''
a ='''unwanted, running'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.tokenizer_class(self.vocab_file )
a =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [9, 6, 7, 12, 10, 11] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =BasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =BasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =BasicTokenizer(do_lower_case=__A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a ={}
for i, token in enumerate(__A ):
a =i
a =WordpieceTokenizer(vocab=__A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a =[1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
a =tokenizer(__A , padding=__A , return_tensors='''pt''' )
self.assertIsInstance(__A , __A )
a =list(batch.input_ids.numpy()[0] )
self.assertListEqual(__A , __A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102] | 81 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase ( __lowerCamelCase : str ) ->str:
if not sentence:
return ""
_SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , __lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = CLIPTokenizer
lowercase__ = CLIPTokenizerFast
lowercase__ = True
lowercase__ = {}
lowercase__ = False
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase =['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__UpperCamelCase =dict(zip(a_ , range(len(a_ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a_ ) )
def UpperCAmelCase_ ( self : Optional[Any] , **UpperCamelCase__ : str ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase_ ( self : str , **UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase ='''lower newer'''
__UpperCamelCase ='''lower newer'''
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
__UpperCamelCase =tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
@require_ftfy
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase =self.tokenizer_class.from_pretrained(a_ , **a_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
__UpperCamelCase ='''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
__UpperCamelCase =tokenizer_s.tokenize(a_ )
__UpperCamelCase =tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__UpperCamelCase ='''xa\u0303y''' + ''' ''' + '''x\xe3y'''
__UpperCamelCase =tokenizer_s.tokenize(a_ )
__UpperCamelCase =tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of space type
__UpperCamelCase =[
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__UpperCamelCase =tokenizer_s.tokenize(a_ )
__UpperCamelCase =tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
# Test that the tokenization is identical on unicode of line break type
__UpperCamelCase =[
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__UpperCamelCase =tokenizer_s.tokenize(a_ )
__UpperCamelCase =tokenizer_r.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase =f"""{text_of_1_token} {text_of_1_token}"""
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
__UpperCamelCase =tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
__UpperCamelCase =f""" {text}"""
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , )
__UpperCamelCase =tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
pass
| 359 | """simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =-1
__UpperCamelCase =0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__UpperCamelCase =(n * n - 2 * a * n) // (2 * n - 2 * a)
__UpperCamelCase =n - a - b
if c * c == (a * a + b * b):
__UpperCamelCase =a * b * c
if candidate >= product:
__UpperCamelCase =candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 85 | 0 |
import functools
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not all(isinstance(lowercase_ , lowercase_ ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(lowercase_ ) != 3 or not all(isinstance(lowercase_ , lowercase_ ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(lowercase_ ) == 0:
return 0
if min(lowercase_ ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(lowercase_ ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
A__ = set(lowercase_ )
@functools.cache
def dynamic_programming(lowercase_ ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
"""simple docstring"""
def __lowercase ( _a , _a , _a=False ):
if isinstance(_a , _a ) and isinstance(_a , _a ):
snake_case_ : Union[str, Any] = len(set_a.intersection(_a ) )
if alternative_union:
snake_case_ : Any = len(_a ) + len(_a )
else:
snake_case_ : str = len(set_a.union(_a ) )
return intersection / union
if isinstance(_a , (list, tuple) ) and isinstance(_a , (list, tuple) ):
snake_case_ : str = [element for element in set_a if element in set_b]
if alternative_union:
snake_case_ : Tuple = len(_a ) + len(_a )
return len(_a ) / union
else:
snake_case_ : List[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_a ) / len(_a )
return len(_a ) / len(_a )
return None
if __name__ == "__main__":
lowercase__ : Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
lowercase__ : Optional[Any] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 264 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class _lowercase ( SCREAMING_SNAKE_CASE_ ):
lowercase = 'canine'
def __init__( self : Optional[int] , snake_case : Union[str, Any]=7_6_8 , snake_case : Optional[int]=1_2 , snake_case : Optional[int]=1_2 , snake_case : List[str]=3_0_7_2 , snake_case : List[str]="gelu" , snake_case : Union[str, Any]=0.1 , snake_case : str=0.1 , snake_case : List[str]=1_6_3_8_4 , snake_case : Union[str, Any]=1_6 , snake_case : List[Any]=0.02 , snake_case : List[Any]=1e-12 , snake_case : List[str]=0 , snake_case : Dict=0XE000 , snake_case : List[str]=0XE001 , snake_case : Union[str, Any]=4 , snake_case : Dict=4 , snake_case : Optional[int]=8 , snake_case : Tuple=1_6_3_8_4 , snake_case : Optional[int]=1_2_8 , **snake_case : int , ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCamelCase_ : Union[str, Any] = max_position_embeddings
UpperCamelCase_ : Dict = hidden_size
UpperCamelCase_ : Union[str, Any] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : List[Any] = intermediate_size
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Optional[Any] = initializer_range
UpperCamelCase_ : int = type_vocab_size
UpperCamelCase_ : Any = layer_norm_eps
# Character config:
UpperCamelCase_ : Optional[int] = downsampling_rate
UpperCamelCase_ : List[str] = upsampling_kernel_size
UpperCamelCase_ : List[Any] = num_hash_functions
UpperCamelCase_ : List[Any] = num_hash_buckets
UpperCamelCase_ : Any = local_transformer_stride
| 350 | import numpy
# List of input, output pairs
a_ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a_ = (((515, 22, 13), 555), ((61, 35, 49), 150))
a_ = [2, 4, 1, 5]
a_ = len(train_data)
a_ = 0.009
def __lowercase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any="train" ):
return calculate_hypothesis_value(lowerCamelCase , lowerCamelCase ) - output(
lowerCamelCase , lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : List[str] = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Any ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict=m ):
UpperCamelCase_ : str = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : List[str] = summation_of_cost_derivative(lowerCamelCase , lowerCamelCase ) / m
return cost_derivative_value
def __lowercase ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
UpperCamelCase_ : Optional[int] = 0.0_0_0_0_0_2
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = 0
while True:
j += 1
UpperCamelCase_ : Dict = [0, 0, 0, 0]
for i in range(0 , len(lowerCamelCase ) ):
UpperCamelCase_ : Any = get_cost_derivative(i - 1 )
UpperCamelCase_ : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase , lowerCamelCase , atol=lowerCamelCase , rtol=lowerCamelCase , ):
break
UpperCamelCase_ : Optional[Any] = temp_parameter_vector
print(('Number of iterations:', j) )
def __lowercase ( ):
for i in range(len(lowerCamelCase ) ):
print(('Actual output value:', output(lowerCamelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(lowerCamelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 50 | 0 |
from bisect import bisect
from itertools import accumulate
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = sorted(zip(lowercase__ , lowercase__ ) , key=lambda lowercase__ : x[0] / x[1] , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = [i[0] for i in r], [i[1] for i in r]
__SCREAMING_SNAKE_CASE : Dict = list(accumulate(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = bisect(lowercase__ , lowercase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 5_0257 , __lowerCamelCase: int = 1024 , __lowerCamelCase: int = 768 , __lowerCamelCase: int = 12 , __lowerCamelCase: int = 12 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: str = "gelu_new" , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 1e-5 , __lowerCamelCase: float = 0.02 , __lowerCamelCase: bool = True , __lowerCamelCase: bool = True , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__: Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal." )
UpperCamelCase__: List[str] = prefix_inner_dim
UpperCamelCase__: Optional[int] = prefix_hidden_dim
UpperCamelCase__: Dict = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCamelCase__: Tuple = (
nn.Linear(self.prefix_hidden_dim , __lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCamelCase__: List[str] = GPTaConfig(
vocab_size=__lowerCamelCase , n_positions=__lowerCamelCase , n_embd=__lowerCamelCase , n_layer=__lowerCamelCase , n_head=__lowerCamelCase , n_inner=__lowerCamelCase , activation_function=__lowerCamelCase , resid_pdrop=__lowerCamelCase , embd_pdrop=__lowerCamelCase , attn_pdrop=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , initializer_range=__lowerCamelCase , scale_attn_weights=__lowerCamelCase , use_cache=__lowerCamelCase , scale_attn_by_inverse_layer_idx=__lowerCamelCase , reorder_and_upcast_attn=__lowerCamelCase , )
UpperCamelCase__: Any = GPTaLMHeadModel(__lowerCamelCase )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[torch.Tensor] = None , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
UpperCamelCase__: Dict = self.encode_prefix(__lowerCamelCase )
UpperCamelCase__: List[Any] = self.decode_prefix(__lowerCamelCase )
UpperCamelCase__: str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCamelCase__: Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCamelCase__: Any = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCamelCase__: str = self.transformer(inputs_embeds=__lowerCamelCase , labels=__lowerCamelCase , attention_mask=__lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: int , __lowerCamelCase: torch.device ):
'''simple docstring'''
return torch.zeros(__lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return self.encode_prefix(__lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = torch.split(__lowerCamelCase , 1 , dim=0 )
UpperCamelCase__: Dict = []
UpperCamelCase__: Union[str, Any] = []
for feature in features:
UpperCamelCase__: Tuple = self.decode_prefix(feature.to(__lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.generate_beam(
input_embeds=__lowerCamelCase , device=__lowerCamelCase , eos_token_id=__lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase_ ( self: str , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: int = 5 , __lowerCamelCase: int = 67 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: Optional[int] = None , ):
'''simple docstring'''
UpperCamelCase__: Tuple = eos_token_id
UpperCamelCase__: List[str] = None
UpperCamelCase__: Any = None
UpperCamelCase__: Optional[int] = torch.ones(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.int )
UpperCamelCase__: Dict = torch.zeros(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.bool )
if input_embeds is not None:
UpperCamelCase__: Dict = input_embeds
else:
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
for i in range(__lowerCamelCase ):
UpperCamelCase__: Union[str, Any] = self.transformer(inputs_embeds=__lowerCamelCase )
UpperCamelCase__: Tuple = outputs.logits
UpperCamelCase__: Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCamelCase__: List[str] = logits.softmax(-1 ).log()
if scores is None:
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = logits.topk(__lowerCamelCase , -1 )
UpperCamelCase__: str = generated.expand(__lowerCamelCase , *generated.shape[1:] )
UpperCamelCase__ , UpperCamelCase__: Dict = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCamelCase__: int = next_tokens
else:
UpperCamelCase__: Optional[int] = tokens.expand(__lowerCamelCase , *tokens.shape[1:] )
UpperCamelCase__: str = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCamelCase__: Optional[Any] = -float(np.inf )
UpperCamelCase__: Any = 0
UpperCamelCase__: List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCamelCase__: Any = scores_sum / seq_lengths[:, None]
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = scores_sum_average.view(-1 ).topk(__lowerCamelCase , -1 )
UpperCamelCase__: Dict = next_tokens // scores_sum.shape[1]
UpperCamelCase__: Optional[int] = seq_lengths[next_tokens_source]
UpperCamelCase__: int = next_tokens % scores_sum.shape[1]
UpperCamelCase__: Optional[int] = next_tokens.unsqueeze(1 )
UpperCamelCase__: Tuple = tokens[next_tokens_source]
UpperCamelCase__: Tuple = torch.cat((tokens, next_tokens) , dim=1 )
UpperCamelCase__: List[Any] = generated[next_tokens_source]
UpperCamelCase__: int = scores_sum_average * seq_lengths
UpperCamelCase__: Dict = is_stopped[next_tokens_source]
UpperCamelCase__: List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCamelCase__: Any = torch.cat((generated, next_token_embed) , dim=1 )
UpperCamelCase__: Union[str, Any] = is_stopped + next_tokens.eq(__lowerCamelCase ).squeeze()
if is_stopped.all():
break
UpperCamelCase__: Optional[Any] = scores / seq_lengths
UpperCamelCase__: int = scores.argsort(descending=__lowerCamelCase )
# tokens tensors are already padded to max_seq_length
UpperCamelCase__: Dict = [tokens[i] for i in order]
UpperCamelCase__: Any = torch.stack(__lowerCamelCase , dim=0 )
UpperCamelCase__: int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 149 | 0 |
from collections import defaultdict
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = first_str.lower().strip()
lowerCamelCase_ = second_str.lower().strip()
# Remove whitespace
lowerCamelCase_ = first_str.replace(" " , "" )
lowerCamelCase_ = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
# Default values for count should be 0
lowerCamelCase_ = defaultdict(lowerCamelCase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowerCamelCase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A =input('''Enter the first string ''').strip()
__A =input('''Enter the second string ''').strip()
__A =check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 47 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> Optional[int]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
lowerCamelCase_ = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )
lowerCamelCase_ = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Any:
lowerCamelCase_ = BioGptForCausalLM(config=lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Tuple:
lowerCamelCase_ = BioGptModel(config=lowercase )
model.to(lowercase )
model.eval()
# create attention mask
lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
lowerCamelCase_ = self.seq_length // 2
lowerCamelCase_ = 0
# first forward pass
lowerCamelCase_ , lowerCamelCase_ = model(lowercase , attention_mask=lowercase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
lowerCamelCase_ = ids_tensor((1,) , lowercase ).item() + 1
lowerCamelCase_ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
lowerCamelCase_ = random_other_next_tokens
# append to next input_ids and attn_mask
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase )] , dim=1 , )
# get two different outputs
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )["last_hidden_state"]
lowerCamelCase_ = model(lowercase , past_key_values=lowercase , attention_mask=lowercase )["last_hidden_state"]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Dict:
lowerCamelCase_ = BioGptModel(config=lowercase ).to(lowercase ).eval()
lowerCamelCase_ = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase )
# first forward pass
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
lowerCamelCase_ , lowerCamelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase_ = model(lowercase , attention_mask=lowercase )["last_hidden_state"]
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[
"last_hidden_state"
]
# select random slice
lowerCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase , lowercase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase , lowercase=False ) -> Optional[int]:
lowerCamelCase_ = BioGptForCausalLM(lowercase )
model.to(lowercase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
lowerCamelCase_ = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def SCREAMING_SNAKE_CASE_( self , lowercase , *lowercase ) -> List[Any]:
lowerCamelCase_ = BioGptModel(lowercase )
lowerCamelCase_ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , *lowercase ) -> Any:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = BioGptForTokenClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = BioGptModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase , gradient_checkpointing=lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowercase )
lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase_ = "left"
# Define PAD Token = EOS Token = 50256
lowerCamelCase_ = tokenizer.eos_token
lowerCamelCase_ = model.config.eos_token_id
# use different length sentences to test batching
lowerCamelCase_ = [
"Hello, my dog is a little",
"Today, I",
]
lowerCamelCase_ = tokenizer(lowercase , return_tensors="pt" , padding=lowercase )
lowerCamelCase_ = inputs["input_ids"].to(lowercase )
lowerCamelCase_ = model.generate(
input_ids=lowercase , attention_mask=inputs["attention_mask"].to(lowercase ) , )
lowerCamelCase_ = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(lowercase )
lowerCamelCase_ = model.generate(input_ids=lowercase )
lowerCamelCase_ = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
lowerCamelCase_ = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(lowercase )
lowerCamelCase_ = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
lowerCamelCase_ = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
lowerCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
lowerCamelCase_ = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> int:
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = BioGptModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Tuple:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = input_dict["input_ids"]
lowerCamelCase_ = input_ids.ne(1 ).to(lowercase )
lowerCamelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ = 3
lowerCamelCase_ = "multi_label_classification"
lowerCamelCase_ = input_dict["input_ids"]
lowerCamelCase_ = input_ids.ne(1 ).to(lowercase )
lowerCamelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ = BioGptForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowerCamelCase_ = model(lowercase , attention_mask=lowercase , labels=lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
lowerCamelCase_ = torch.tensor([[2, 4805, 9, 656, 21]] )
lowerCamelCase_ = model(lowercase )[0]
lowerCamelCase_ = 42384
lowerCamelCase_ = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowercase )
lowerCamelCase_ = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase_ = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(lowercase )
torch.manual_seed(0 )
lowerCamelCase_ = tokenizer("COVID-19 is" , return_tensors="pt" ).to(lowercase )
lowerCamelCase_ = model.generate(
**lowercase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=lowercase , )
lowerCamelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase )
lowerCamelCase_ = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(lowercase , lowercase )
| 47 | 1 |
import math
def A_ ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _UpperCAmelCase = 1_00_01 ):
try:
SCREAMING_SNAKE_CASE_: Any = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
SCREAMING_SNAKE_CASE_: list[int] = []
SCREAMING_SNAKE_CASE_: str = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 |
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(a - b) for a, b in zip(_a , _a)))
def lowerCamelCase__ ( _a):
if point:
if isinstance(_a , _a):
for item in point:
if not isinstance(_a , (int, float)):
SCREAMING_SNAKE_CASE : List[Any] = (
"Expected a list of numbers as input, found "
f"{type(_a).__name__}"
)
raise TypeError(_a)
else:
SCREAMING_SNAKE_CASE : List[Any] = f"Expected a list of numbers as input, found {type(_a).__name__}"
raise TypeError(_a)
else:
raise ValueError("Missing an input")
def lowerCamelCase__ ( _a , _a):
_validate_point(_a)
_validate_point(_a)
if len(_a) != len(_a):
raise ValueError("Both points must be in the same n-dimensional space")
return float(sum(abs(x - y) for x, y in zip(_a , _a)))
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ) -> Tuple:
_a = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
_a = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(lowerCAmelCase_ )
from datasets import load_dataset
_a = load_dataset('nielsr/rvlcdip-demo' )
_a = dataset['train'][0]['image'].convert('RGB' )
_a = image_processor(lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
_a = outputs.logits
_a = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowerCAmelCase_ )
_a = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=lowerCAmelCase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 365 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A () -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _A () -> Any:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _A () -> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase__ ):
http_head('https://huggingface.co' )
| 104 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Dict = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : List[str] = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(f"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
lowercase : Optional[Any] = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = []
lowercase : List[Any] = []
lowercase : str = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase : List[str] = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(f"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase : List[Any] = name[1:]
# figure out how many levels deep the name is
lowercase : int = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(SCREAMING_SNAKE_CASE__ )
# read data
lowercase : List[Any] = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
names.append("""/""".join(SCREAMING_SNAKE_CASE__ ) )
arrays.append(SCREAMING_SNAKE_CASE__ )
logger.info(f"Read a total of {len(SCREAMING_SNAKE_CASE__ ):,} layers" )
# Sanity check
if len(set(SCREAMING_SNAKE_CASE__ ) ) != 1:
raise ValueError(f"Found layer names with different depths (layer depth {list(set(SCREAMING_SNAKE_CASE__ ) )})" )
lowercase : Union[str, Any] = list(set(SCREAMING_SNAKE_CASE__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[str] = full_name.split("""/""" )
lowercase : int = model
lowercase : List[Any] = []
for i, m_name in enumerate(SCREAMING_SNAKE_CASE__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowercase : Tuple = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , """embeddings""" )
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """encoder""" )
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , """layer""" )
lowercase : Dict = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , """pooler""" )
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowercase : str = getattr(SCREAMING_SNAKE_CASE__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowercase : Tuple = getattr(SCREAMING_SNAKE_CASE__ , """token_type_embeddings""" )
else:
raise ValueError(f"Unknown embedding layer with name {full_name}" )
trace.append("""weight""" )
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , """attention""" )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowercase : Tuple = getattr(SCREAMING_SNAKE_CASE__ , """attention""" )
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
lowercase : str = getattr(SCREAMING_SNAKE_CASE__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , """attention""" )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
lowercase : str = getattr(SCREAMING_SNAKE_CASE__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
lowercase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowercase : str = getattr(SCREAMING_SNAKE_CASE__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , """intermediate""" )
lowercase : int = getattr(SCREAMING_SNAKE_CASE__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , """weight""" )
else:
logger.warning(f"Ignored {m_name}" )
# for certain layers reshape is necessary
lowercase : List[Any] = """.""".join(SCREAMING_SNAKE_CASE__ )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , SCREAMING_SNAKE_CASE__ ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase : str = array.transpose()
if pointer.shape == array.shape:
lowercase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
f"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
f" {array.shape}" )
logger.info(f"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
# Instantiate model
logger.info(f"Loading model based on config from {config_path}..." )
lowercase : Any = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = BertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from checkpoint
logger.info(f"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
logger.info(f"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
lowercase : str = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 20 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
from numpy import exp, pi, sqrt
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 171 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowercase : Dict = (720, 1280) # Height, Width
lowercase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowercase : Tuple = 1 / 100
lowercase : Optional[int] = ""
lowercase : Any = ""
lowercase : Union[str, Any] = ""
lowercase : str = 250
def UpperCAmelCase_ ():
__UpperCamelCase , __UpperCamelCase : Dict = get_dataset(_lowerCAmelCase , _lowerCAmelCase )
for index in range(_lowerCAmelCase ):
__UpperCamelCase : Optional[int] = random.sample(range(len(_lowerCAmelCase ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = update_image_and_anno(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , filter_scale=_lowerCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase : List[str] = random_chars(32 )
__UpperCamelCase : int = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__UpperCamelCase : Dict = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , _lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
__UpperCamelCase : str = []
for anno in new_annos:
__UpperCamelCase : List[str] = anno[3] - anno[1]
__UpperCamelCase : Union[str, Any] = anno[4] - anno[2]
__UpperCamelCase : List[str] = anno[1] + width / 2
__UpperCamelCase : str = anno[2] + height / 2
__UpperCamelCase : List[str] = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_lowerCAmelCase )
with open(F'''{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : str ):
__UpperCamelCase : int = []
__UpperCamelCase : Dict = []
for label_file in glob.glob(os.path.join(_lowerCAmelCase , "*.txt" ) ):
__UpperCamelCase : Any = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCAmelCase ) as in_file:
__UpperCamelCase : Any = in_file.readlines()
__UpperCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , F'''{label_name}.jpg''' )
__UpperCamelCase : Dict = []
for obj_list in obj_lists:
__UpperCamelCase : Optional[int] = obj_list.rstrip("\n" ).split(" " )
__UpperCamelCase : int = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase : Any = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase : Any = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase : Optional[int] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCAmelCase )
labels.append(_lowerCAmelCase )
return img_paths, labels
def UpperCAmelCase_ (_lowerCAmelCase : list , _lowerCAmelCase : list , _lowerCAmelCase : list[int] , _lowerCAmelCase : tuple[int, int] , _lowerCAmelCase : tuple[float, float] , _lowerCAmelCase : float = 0.0 , ):
__UpperCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase : Any = int(scale_x * output_size[1] )
__UpperCamelCase : str = int(scale_y * output_size[0] )
__UpperCamelCase : str = []
__UpperCamelCase : Optional[int] = []
for i, index in enumerate(_lowerCAmelCase ):
__UpperCamelCase : Any = all_img_list[index]
path_list.append(_lowerCAmelCase )
__UpperCamelCase : Dict = all_annos[index]
__UpperCamelCase : Any = cva.imread(_lowerCAmelCase )
if i == 0: # top-left
__UpperCamelCase : Dict = cva.resize(_lowerCAmelCase , (divid_point_x, divid_point_y) )
__UpperCamelCase : Optional[int] = img
for bbox in img_annos:
__UpperCamelCase : List[str] = bbox[1] * scale_x
__UpperCamelCase : Dict = bbox[2] * scale_y
__UpperCamelCase : Optional[Any] = bbox[3] * scale_x
__UpperCamelCase : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase : Optional[Any] = cva.resize(_lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase : Dict = img
for bbox in img_annos:
__UpperCamelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase : str = bbox[2] * scale_y
__UpperCamelCase : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase : List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase : Dict = cva.resize(_lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase : Tuple = img
for bbox in img_annos:
__UpperCamelCase : List[Any] = bbox[1] * scale_x
__UpperCamelCase : str = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase : Dict = bbox[3] * scale_x
__UpperCamelCase : List[str] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase : List[Any] = cva.resize(
_lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase : Tuple = img
for bbox in img_annos:
__UpperCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase : Optional[int] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCAmelCase_ (_lowerCAmelCase : int ):
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅") | 171 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''data2vec-audio'''
def __init__( self: Tuple , UpperCamelCase_: Dict=32 , UpperCamelCase_: List[Any]=768 , UpperCamelCase_: int=12 , UpperCamelCase_: List[str]=12 , UpperCamelCase_: str=3_072 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: int=0.1 , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: str=0.1 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: List[str]=0.02 , UpperCamelCase_: Dict=1E-5 , UpperCamelCase_: str="gelu" , UpperCamelCase_: Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase_: str=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_: Any=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_: int=False , UpperCamelCase_: List[str]=16 , UpperCamelCase_: List[str]=19 , UpperCamelCase_: Optional[Any]=5 , UpperCamelCase_: List[str]=0.05 , UpperCamelCase_: List[str]=10 , UpperCamelCase_: List[Any]=2 , UpperCamelCase_: str=0.0 , UpperCamelCase_: Dict=10 , UpperCamelCase_: Optional[int]=0 , UpperCamelCase_: int="sum" , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Tuple=False , UpperCamelCase_: Dict=256 , UpperCamelCase_: Optional[int]=(512, 512, 512, 512, 1_500) , UpperCamelCase_: Tuple=(5, 3, 3, 1, 1) , UpperCamelCase_: str=(1, 2, 3, 1, 1) , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: int=0 , UpperCamelCase_: List[str]=1 , UpperCamelCase_: str=2 , UpperCamelCase_: int=False , UpperCamelCase_: Tuple=3 , UpperCamelCase_: Any=2 , UpperCamelCase_: Dict=3 , UpperCamelCase_: str=None , **UpperCamelCase_: Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
lowercase__ = hidden_size
lowercase__ = feat_extract_activation
lowercase__ = list(UpperCamelCase_ )
lowercase__ = list(UpperCamelCase_ )
lowercase__ = list(UpperCamelCase_ )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = conv_pos_kernel_size
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
lowercase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# adapter
lowercase__ = add_adapter
lowercase__ = adapter_kernel_size
lowercase__ = adapter_stride
lowercase__ = num_adapter_layers
lowercase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(UpperCamelCase_ )
lowercase__ = list(UpperCamelCase_ )
lowercase__ = list(UpperCamelCase_ )
lowercase__ = xvector_output_dim
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
return math.prod(self.conv_stride )
| 110 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as flax_state_f:
lowercase__ = from_bytes(SCREAMING_SNAKE_CASE , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(SCREAMING_SNAKE_CASE ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
lowercase__ = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE ) ).values()
if any(SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
lowercase__ = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE )
lowercase__ = ''''''
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE , sep='''.''' )
lowercase__ = pt_model.state_dict()
# keep track of unexpected & missing keys
lowercase__ = []
lowercase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase__ = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = jnp.transpose(SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
lowercase__ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowercase__ = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
lowercase__ = '''.'''.join(SCREAMING_SNAKE_CASE )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
lowercase__ = np.asarray(SCREAMING_SNAKE_CASE ) if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
lowercase__ = list(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 110 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 140 |
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCamelCase : Union[str, Any] = set()
# Replace all the whitespace in our sentence
UpperCamelCase : Union[str, Any] = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowerCAmelCase ) == 26
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
UpperCamelCase : List[Any] = [False] * 26
for char in input_str:
if char.islower():
UpperCamelCase : Tuple = True
elif char.isupper():
UpperCamelCase : str = True
return all(_lowerCAmelCase )
def A_ ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ) -> None:
from timeit import timeit
UpperCamelCase : Tuple = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_faster()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_fastest()" , setup=_lowerCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 140 | 1 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A( unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_choices
def UpperCAmelCase_ (self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __A( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = FlaxRoFormerModelTester(self )
@slow
def UpperCAmelCase_ (self ):
for model_class_name in self.all_model_classes:
UpperCamelCase__ = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCamelCase )
UpperCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class __A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
UpperCamelCase__ = jnp.array([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ = model(_lowerCamelCase )[0]
UpperCamelCase__ = 5_00_00
UpperCamelCase__ = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
UpperCamelCase__ = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 244 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = PegasusTokenizer
A_ : int = PegasusTokenizerFast
A_ : Optional[Any] = True
A_ : Union[str, Any] = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A = PegasusTokenizer(_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ):
'''simple docstring'''
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = '''</s>'''
__A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<pad>''' )
self.assertEqual(vocab_keys[1], '''</s>''' )
self.assertEqual(vocab_keys[-1], '''v''' )
self.assertEqual(len(_lowerCamelCase ), 11_03 )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 11_03 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
__A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
__A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
__A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__A = '''To ensure a smooth flow of bank resolutions.'''
__A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = ['''This is going to be way too long.''' * 1_50, '''short example''']
__A = ['''not super long but more than 5 tokens''', '''tiny''']
__A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
__A = self._large_tokenizer(
text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# fmt: off
__A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', )
@require_sentencepiece
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = PegasusTokenizer
A_ : Union[str, Any] = PegasusTokenizerFast
A_ : Any = True
A_ : str = True
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ):
'''simple docstring'''
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
__A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
__A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = ['''This is going to be way too long.''' * 10_00, '''short example''']
__A = ['''not super long but more than 5 tokens''', '''tiny''']
__A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
__A = self._large_tokenizer(
text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
__A = self._large_tokenizer(_lowerCamelCase ).input_ids
self.assertListEqual(
_lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
| 266 | 0 |
import random
def snake_case_ ( snake_case , snake_case ) -> tuple:
lowercase__ , lowercase__ , lowercase__: Any = [], [], []
for element in data:
if element < pivot:
less.append(snake_case )
elif element > pivot:
greater.append(snake_case )
else:
equal.append(snake_case )
return less, equal, greater
def snake_case_ ( snake_case , snake_case ) -> Tuple:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(snake_case ) or index < 0:
return None
lowercase__: Optional[int] = items[random.randint(0 , len(snake_case ) - 1 )]
lowercase__: int = 0
lowercase__ , lowercase__ , lowercase__: Optional[Any] = _partition(snake_case , snake_case )
lowercase__: Any = len(snake_case )
lowercase__: List[str] = len(snake_case )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(snake_case , snake_case )
# must be in larger
else:
return quick_select(snake_case , index - (m + count) )
| 288 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ = 101 ) -> Any:
'''simple docstring'''
lowercase__: Any = length
def __len__( self ) -> List[Any]:
'''simple docstring'''
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return i
class __a :
def __call__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return {"input_ids": torch.tensor(lowerCAmelCase__ ), "labels": torch.tensor(lowerCAmelCase__ )}
class __a ( nn.Module ):
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__: List[str] = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __a ( __UpperCamelCase ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: Optional[int] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __a ( __UpperCamelCase ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: List[str] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__lowerCAmelCase = DummyDataset(dataset_length)
def snake_case_ ( snake_case ) -> Dict:
lowercase__: str = list(range(len(snake_case ) ) )
lowercase__: Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = 2
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = None
| 288 | 1 |
'''simple docstring'''
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class a ( enum.Enum ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
@add_end_docstrings(_a )
class a ( _a ):
_lowerCAmelCase = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *__magic_name__ , **__magic_name__ ) -> Optional[int]:
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a = None
if self.model.config.prefix is not None:
_a = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a = self._sanitize_parameters(prefix=A_ , **self._forward_params )
_a = {**self._preprocess_params, **preprocess_params}
_a = {**self._forward_params, **forward_params}
def __UpperCAmelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> Optional[Any]:
_a = {}
if prefix is not None:
_a = prefix
if prefix:
_a = self.tokenizer(
A_ , padding=A_ , add_special_tokens=A_ , return_tensors=self.framework )
_a = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'
' [None, \'hole\']' )
_a = handle_long_generation
preprocess_params.update(A_ )
_a = generate_kwargs
_a = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
_a = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
_a = ReturnType.TENSORS
if return_type is not None:
_a = return_type
if clean_up_tokenization_spaces is not None:
_a = clean_up_tokenization_spaces
if stop_sequence is not None:
_a = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
_a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> Union[str, Any]:
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*A_ , **A_ )
def __call__( self , __magic_name__ , **__magic_name__ ) -> Any:
return super().__call__(A_ , **A_ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__="" , __magic_name__=None , **__magic_name__ ) -> int:
_a = self.tokenizer(
prefix + prompt_text , padding=A_ , add_special_tokens=A_ , return_tensors=self.framework )
_a = prompt_text
if handle_long_generation == "hole":
_a = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a = generate_kwargs['max_new_tokens']
else:
_a = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
_a = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_a = inputs['attention_mask'][:, -keep_length:]
return inputs
def __UpperCAmelCase ( self , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
_a = model_inputs['input_ids']
_a = model_inputs.get('attention_mask' , A_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a = None
_a = None
_a = 1
else:
_a = input_ids.shape[0]
_a = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
_a = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_a = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a = self.model.generate(input_ids=A_ , attention_mask=A_ , **A_ )
_a = generated_sequence.shape[0]
if self.framework == "pt":
_a = generated_sequence.reshape(A_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a = tf.reshape(A_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=ReturnType.FULL_TEXT , __magic_name__=True ) -> Tuple:
_a = model_outputs['generated_sequence'][0]
_a = model_outputs['input_ids']
_a = model_outputs['prompt_text']
_a = generated_sequence.numpy().tolist()
_a = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a = self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a = 0
else:
_a = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , ) )
if return_type == ReturnType.FULL_TEXT:
_a = prompt_text + text[prompt_length:]
else:
_a = text[prompt_length:]
_a = {'generated_text': all_text}
records.append(A_ )
return records
| 168 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310 | 0 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 8 ) -> str:
lowercase__: List[str] = ascii_letters + digits + punctuation
return "".join(secrets.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__UpperCAmelCase )
lowercase__: int = i // 3
lowercase__: List[Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase__: Tuple = (
chars_incl
+ random(__UpperCAmelCase , quotient + remainder )
+ random(__UpperCAmelCase , __UpperCAmelCase )
+ random(__UpperCAmelCase , __UpperCAmelCase )
)
lowercase__: Any = list(__UpperCAmelCase )
shuffle(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
# random is a generalised function for letters, characters and numbers
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
return "".join(secrets.choice(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
pass # Put your code here...
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = 8 ) -> bool:
if len(__UpperCAmelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase__: int = any(char in ascii_uppercase for char in password )
lowercase__: Dict = any(char in ascii_lowercase for char in password )
lowercase__: Any = any(char in digits for char in password )
lowercase__: Dict = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def SCREAMING_SNAKE_CASE__ ( ) -> int:
lowercase__: Dict = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase__: Optional[Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(__UpperCAmelCase ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(__UpperCAmelCase , __UpperCAmelCase ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 2 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 1 |
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a : Any = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a : List[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('''\n'''.join(upper_files) + '''\n''')
a : List[Any] = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('''\n'''.join(space_files) + '''\n''')
a : Optional[int] = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('''\n'''.join(hyphen_files) + '''\n''')
a : Union[str, Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('''\n'''.join(nodir_files) + '''\n''')
a : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 105 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.25) = }''')
print(F'''{price_plus_tax(125.50, 0.05) = }''')
| 105 | 1 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
_A : int =random.Random()
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=1.0 , UpperCamelCase=None , UpperCamelCase=None ) -> Optional[Any]:
if rng is None:
lowerCamelCase__ : str = global_rng
lowerCamelCase__ : Optional[int] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowercase ( unittest.TestCase ):
def __init__( self: List[str] , UpperCamelCase__: int , UpperCamelCase__: Optional[Any]=7 , UpperCamelCase__: List[str]=400 , UpperCamelCase__: Optional[Any]=2_000 , UpperCamelCase__: int=24 , UpperCamelCase__: List[str]=24 , UpperCamelCase__: str=0.0 , UpperCamelCase__: List[Any]=16_000 , UpperCamelCase__: Any=True , UpperCamelCase__: str=True , ):
lowerCamelCase__ : str = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = min_seq_length
lowerCamelCase__ : Union[str, Any] = max_seq_length
lowerCamelCase__ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase__ : Any = feature_size
lowerCamelCase__ : List[str] = num_mel_bins
lowerCamelCase__ : Union[str, Any] = padding_value
lowerCamelCase__ : List[str] = sampling_rate
lowerCamelCase__ : Union[str, Any] = return_attention_mask
lowerCamelCase__ : Any = do_normalize
def lowerCamelCase_ ( self: Optional[int] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Dict=False , UpperCamelCase__: Optional[int]=False ):
def _flatten(UpperCamelCase__: Optional[int] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
lowerCamelCase__ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase__ : List[str] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase__ : List[str] = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowercase ( _lowercase , unittest.TestCase ):
a = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Tuple = SpeechaTextFeatureExtractionTester(self )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] ):
self.assertTrue(np.all(np.mean(UpperCamelCase__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCamelCase_ ( self: List[Any] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCamelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase__ : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase__ : Union[str, Any] = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase__ : List[str] = feature_extractor(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase__ : List[str] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCamelCase__ : str = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
lowerCamelCase__ : Any = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowerCamelCase__ : str = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase__ : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase__ : Optional[Any] = np.asarray(UpperCamelCase__ )
lowerCamelCase__ : int = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowerCamelCase__ : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase__ : int = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase__ : Dict = [None, 16, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[Any] = feature_extractor(
UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = inputs.input_features
lowerCamelCase__ : List[Any] = inputs.attention_mask
lowerCamelCase__ : Dict = [np.sum(UpperCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase__ : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase__ : Any = [None, 16, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase__ : List[str] = feature_extractor(
UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""np""" , return_attention_mask=UpperCamelCase__ )
lowerCamelCase__ : Any = inputs.input_features
lowerCamelCase__ : List[str] = inputs.attention_mask
lowerCamelCase__ : Union[str, Any] = [np.sum(UpperCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase__ : int = feature_extractor(
UpperCamelCase__ , padding="""max_length""" , max_length=4 , truncation=UpperCamelCase__ , return_tensors="""np""" , return_attention_mask=UpperCamelCase__ , )
lowerCamelCase__ : List[Any] = inputs.input_features
lowerCamelCase__ : Optional[int] = inputs.attention_mask
lowerCamelCase__ : int = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase__ : Dict = feature_extractor(
UpperCamelCase__ , padding="""longest""" , max_length=4 , truncation=UpperCamelCase__ , return_tensors="""np""" , return_attention_mask=UpperCamelCase__ , )
lowerCamelCase__ : Any = inputs.input_features
lowerCamelCase__ : Optional[int] = inputs.attention_mask
lowerCamelCase__ : Tuple = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCamelCase__ : Optional[Any] = feature_extractor(
UpperCamelCase__ , padding="""longest""" , max_length=16 , truncation=UpperCamelCase__ , return_tensors="""np""" , return_attention_mask=UpperCamelCase__ , )
lowerCamelCase__ : str = inputs.input_features
lowerCamelCase__ : List[str] = inputs.attention_mask
lowerCamelCase__ : Any = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def lowerCamelCase_ ( self: Optional[Any] ):
import torch
lowerCamelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : str = np.random.rand(100 , 32 ).astype(np.floataa )
lowerCamelCase__ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase__ : Tuple = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase__ : Any = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Optional[Any] ):
from datasets import load_dataset
lowerCamelCase__ : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase__ : Any = ds.sort("""id""" ).select(range(UpperCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCamelCase_ ( self: Union[str, Any] ):
# fmt: off
lowerCamelCase__ : Optional[Any] = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
lowerCamelCase__ : List[Any] = self._load_datasamples(1 )
lowerCamelCase__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase__ : List[Any] = feature_extractor(UpperCamelCase__ , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCamelCase__ , atol=1e-4 ) )
| 129 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : str ={
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Union[str, Any] ={
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : str ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : int ={
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if isinstance(UpperCamelCase , UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Any:
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> str:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCamelCase__ : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : str = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Optional[int] = checkpoint["""time_embed.0.weight"""]
lowerCamelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCamelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCamelCase__ : Optional[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : Optional[Any] = checkpoint["""label_emb.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""input_blocks.0.0.weight"""]
lowerCamelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
lowerCamelCase__ : Optional[Any] = unet_config["""down_block_types"""]
lowerCamelCase__ : Any = unet_config["""layers_per_block"""]
lowerCamelCase__ : Any = unet_config["""attention_head_dim"""]
lowerCamelCase__ : List[Any] = unet_config["""block_out_channels"""]
lowerCamelCase__ : str = 1
lowerCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase ):
lowerCamelCase__ : List[Any] = channels_list[i]
lowerCamelCase__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : int = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : List[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Any = f'''down_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.1'''
lowerCamelCase__ : Tuple = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.downsamplers.0'''
lowerCamelCase__ : str = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
lowerCamelCase__ : Union[str, Any] = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : Any = """mid_block.resnets.0"""
lowerCamelCase__ : Optional[Any] = """middle_block.0"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = """mid_block.attentions.0"""
lowerCamelCase__ : Dict = """middle_block.1"""
lowerCamelCase__ : int = convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = """mid_block.resnets.1"""
lowerCamelCase__ : Tuple = """middle_block.2"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Any = unet_config["""up_block_types"""]
for i, layer_type in enumerate(UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : int = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Any = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Dict = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer-1}.1'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : str = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Any = f'''output_blocks.{current_layer}.1'''
lowerCamelCase__ : Optional[int] = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.2'''
lowerCamelCase__ : List[str] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = checkpoint["""out.0.weight"""]
lowerCamelCase__ : Dict = checkpoint["""out.0.bias"""]
lowerCamelCase__ : Dict = checkpoint["""out.2.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_A : Tuple =argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_A : Tuple =parser.parse_args()
_A : Optional[int] =strabool(args.class_cond)
_A : List[str] =os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : int =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Tuple =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : Any =TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_A : str =None
_A : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config)
_A : Optional[int] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : Tuple =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : int =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Union[str, Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
_A : str =CMStochasticIterativeScheduler(**scheduler_config)
_A : Optional[Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 129 | 1 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowercase_ ( _lowerCamelCase : str = ""):
lowercase__ : str = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowercase__ : int = BeautifulSoup(requests.get(_lowerCamelCase).text , "html.parser")
lowercase__ : int = soup.find_all("td" , attrs="titleColumn")
lowercase__ : Any = soup.find_all("td" , class_="ratingColumn imdbRating")
return {
title.a.text: float(rating.strong.text)
for title, rating in zip(_lowerCamelCase , _lowerCamelCase)
}
def lowercase_ ( _lowerCamelCase : str = "IMDb_Top_250_Movies.csv"):
lowercase__ : Dict = get_imdb_top_aaa_movies()
with open(_lowerCamelCase , "w" , newline="") as out_file:
lowercase__ : Optional[int] = csv.writer(_lowerCamelCase)
writer.writerow(["Movie title", "IMDb rating"])
for title, rating in movies.items():
writer.writerow([title, rating])
if __name__ == "__main__":
write_movies()
| 87 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case : Optional[int] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''vivit'''
def __init__( self :Optional[Any] , __UpperCamelCase :Dict=2_24 , __UpperCamelCase :int=32 , __UpperCamelCase :Union[str, Any]=[2, 16, 16] , __UpperCamelCase :Optional[Any]=3 , __UpperCamelCase :Optional[Any]=7_68 , __UpperCamelCase :Any=12 , __UpperCamelCase :List[str]=12 , __UpperCamelCase :List[str]=30_72 , __UpperCamelCase :Any="gelu_fast" , __UpperCamelCase :List[Any]=0.0 , __UpperCamelCase :str=0.0 , __UpperCamelCase :Dict=0.02 , __UpperCamelCase :Optional[Any]=1e-06 , __UpperCamelCase :Dict=True , **__UpperCamelCase :Tuple , ):
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**__UpperCamelCase )
| 292 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCamelCase = 0
lowerCamelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCamelCase = tuple[int, int]
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , )-> Union[str, Any]:
lowerCAmelCase__ : int = pos_x
lowerCAmelCase__ : List[Any] = pos_y
lowerCAmelCase__ : Union[str, Any] = (pos_y, pos_x)
lowerCAmelCase__ : Any = goal_x
lowerCAmelCase__ : Dict = goal_y
lowerCAmelCase__ : Any = g_cost
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : int = self.calculate_heuristic()
lowerCAmelCase__ : Any = self.g_cost + self.h_cost
def UpperCAmelCase__( self : Optional[int] )-> List[str]:
lowerCAmelCase__ : Tuple = self.pos_x - self.goal_x
lowerCAmelCase__ : Union[str, Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase_ ) + abs(lowercase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] )-> Dict:
return self.f_cost < other.f_cost
class _a :
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple )-> str:
lowerCAmelCase__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ )
lowerCAmelCase__ : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , lowercase_ )
lowerCAmelCase__ : str = [self.start]
lowerCAmelCase__ : list[Node] = []
lowerCAmelCase__ : int = False
def UpperCAmelCase__( self : int )-> List[Any]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase__ : List[str] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase_ )
self.closed_nodes.append(lowercase_ )
lowerCAmelCase__ : str = self.get_successors(lowercase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
lowerCAmelCase__ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase_ )
else:
self.open_nodes.append(lowercase_ )
return [self.start.pos]
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : str )-> List[str]:
lowerCAmelCase__ : Any = []
for action in delta:
lowerCAmelCase__ : str = parent.pos_x + action[1]
lowerCAmelCase__ : int = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) )
return successors
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Dict )-> Dict:
lowerCAmelCase__ : Optional[int] = node
lowerCAmelCase__ : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase__ : Optional[int] = current_node.parent
path.reverse()
return path
class _a :
def __init__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] )-> List[Any]:
lowerCAmelCase__ : Dict = AStar(lowercase_ , lowercase_ )
lowerCAmelCase__ : Optional[Any] = AStar(lowercase_ , lowercase_ )
lowerCAmelCase__ : Tuple = False
def UpperCAmelCase__( self : List[Any] )-> Union[str, Any]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCAmelCase__ : List[str] = self.fwd_astar.open_nodes.pop(0 )
lowerCAmelCase__ : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase_ , lowercase_ )
self.fwd_astar.closed_nodes.append(lowercase_ )
self.bwd_astar.closed_nodes.append(lowercase_ )
lowerCAmelCase__ : Tuple = current_bwd_node
lowerCAmelCase__ : str = current_fwd_node
lowerCAmelCase__ : Dict = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase_ )
else:
# retrieve the best current path
lowerCAmelCase__ : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowercase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase_ )
else:
astar.open_nodes.append(lowercase_ )
return [self.fwd_astar.start.pos]
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Any:
lowerCAmelCase__ : List[Any] = self.fwd_astar.retrace_path(lowercase_ )
lowerCAmelCase__ : int = self.bwd_astar.retrace_path(lowercase_ )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase__ : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCamelCase = (0, 0)
lowerCamelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase = time.time()
lowerCamelCase = AStar(init, goal)
lowerCamelCase = a_star.search()
lowerCamelCase = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
lowerCamelCase = time.time()
lowerCamelCase = BidirectionalAStar(init, goal)
lowerCamelCase = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 351 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase_ ( _a ):
"""simple docstring"""
def wrapper(*_a , **_a ):
lowerCAmelCase__ : List[str] = timeit.default_timer()
lowerCAmelCase__ : List[Any] = func(*_a , **_a )
lowerCAmelCase__ : Any = timeit.default_timer() - starttime
return delta
lowerCAmelCase__ : Any = func.__name__
return wrapper
def lowerCamelCase_ ( _a , _a=100 , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ : str = seq_shapes or {}
for i in range(_a ):
lowerCAmelCase__ : List[str] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_a , _ArrayXD ):
lowerCAmelCase__ : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_a , datasets.Value ):
if v.dtype == "string":
lowerCAmelCase__ : Dict = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCAmelCase__ : Any = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_a , datasets.Sequence ):
while isinstance(_a , datasets.Sequence ):
lowerCAmelCase__ : Optional[int] = v.feature
lowerCAmelCase__ : str = seq_shapes[k]
lowerCAmelCase__ : Any = np.random.rand(*_a ).astype(v.dtype )
lowerCAmelCase__ : int = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase_ ( _a , _a , _a=100 , _a=None ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = generate_examples(_a , num_examples=_a , seq_shapes=_a )
with ArrowWriter(features=_a , path=_a ) as writer:
for key, record in dummy_data:
lowerCAmelCase__ : Optional[int] = features.encode_example(_a )
writer.write(_a )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
lowerCAmelCase__ : List[Any] = datasets.Dataset.from_file(filename=_a , info=datasets.DatasetInfo(features=_a ) )
return dataset
| 211 | 0 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase=2 , UpperCamelCase=8 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=16 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=36 , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ):
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.get_config()
lowerCamelCase_ = 300
return config
def snake_case ( self ):
"""simple docstring"""
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = self.prepare_config_and_inputs()
lowerCamelCase_ = True
lowerCamelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = MraModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase , token_type_ids=UpperCamelCase )
lowerCamelCase_ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = True
lowerCamelCase_ = MraModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
lowerCamelCase_ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = MraForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = MraForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MraForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = MraForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = MraForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = ()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = MraModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = MraModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip(reason="MRA does not output attentions" )
def snake_case ( self ):
"""simple docstring"""
return
@require_torch
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
lowerCamelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase_ = model(UpperCamelCase )[0]
lowerCamelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
lowerCamelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase_ = model(UpperCamelCase )[0]
lowerCamelCase_ = 5_0265
lowerCamelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
lowerCamelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
lowerCamelCase_ = model(UpperCamelCase )[0]
lowerCamelCase_ = 5_0265
lowerCamelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase )
lowerCamelCase_ = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) )
| 55 |
'''simple docstring'''
a_ : Any = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 55 | 1 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase_ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : List[Any] ) -> Optional[int]:
_snake_case = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
_snake_case = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
_snake_case = '''The dog is cute and lives in the garden house'''
_snake_case = jnp.array([tokenizer.encode(__snake_case )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(__snake_case )['''last_hidden_state''']
self.assertEqual(output.shape , __snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __snake_case , atol=1e-3 ) )
| 371 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def snake_case_(_UpperCamelCase ) -> List[Any]:
"""simple docstring"""
_snake_case = torch.exp(_UpperCamelCase )
_snake_case = torch.sum(_UpperCamelCase , dim=1 ) # sum of exp(x_i)
_snake_case = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_UpperCamelCase ) - B / A
class lowercase_ ( nn.Module ):
def __init__( self : Tuple , A__ : int ) -> Tuple:
super().__init__()
_snake_case = config.output_attentions
_snake_case = config.output_hidden_states
_snake_case = nn.ModuleList([BertLayer(A__ ) for _ in range(config.num_hidden_layers )] )
_snake_case = nn.ModuleList([BertHighway(A__ ) for _ in range(config.num_hidden_layers )] )
_snake_case = [-1 for _ in range(config.num_hidden_layers )]
def UpperCamelCase_ ( self : Any , A__ : Any ) -> Any:
if (type(A__ ) is float) or (type(A__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_snake_case = x
else:
_snake_case = x
def UpperCamelCase_ ( self : Any , A__ : Tuple ) -> int:
_snake_case = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCamelCase_ ( self : Tuple , A__ : Optional[int] , A__ : Dict=None , A__ : List[str]=None , A__ : Union[str, Any]=None , A__ : Dict=None , ) -> Dict:
_snake_case = ()
_snake_case = ()
_snake_case = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
_snake_case = layer_module(
A__ , A__ , head_mask[i] , A__ , A__ )
_snake_case = layer_outputs[0]
if self.output_attentions:
_snake_case = all_attentions + (layer_outputs[1],)
_snake_case = (hidden_states,)
if self.output_hidden_states:
_snake_case = current_outputs + (all_hidden_states,)
if self.output_attentions:
_snake_case = current_outputs + (all_attentions,)
_snake_case = self.highway[i](A__ )
# logits, pooled_output
if not self.training:
_snake_case = highway_exit[0]
_snake_case = entropy(A__ )
_snake_case = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_snake_case = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_snake_case = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ , i + 1 )
else:
_snake_case = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
_snake_case = (hidden_states,)
if self.output_hidden_states:
_snake_case = outputs + (all_hidden_states,)
if self.output_attentions:
_snake_case = outputs + (all_attentions,)
_snake_case = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , __lowercase , )
class lowercase_ ( __lowercase ):
def __init__( self : Optional[Any] , A__ : Any ) -> str:
super().__init__(A__ )
_snake_case = config
_snake_case = BertEmbeddings(A__ )
_snake_case = DeeBertEncoder(A__ )
_snake_case = BertPooler(A__ )
self.init_weights()
def UpperCamelCase_ ( self : Tuple ) -> Optional[Any]:
self.encoder.init_highway_pooler(self.pooler )
def UpperCamelCase_ ( self : List[str] ) -> Tuple:
return self.embeddings.word_embeddings
def UpperCamelCase_ ( self : Optional[Any] , A__ : str ) -> str:
_snake_case = value
def UpperCamelCase_ ( self : Union[str, Any] , A__ : List[Any] ) -> Any:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__ )
@add_start_docstrings_to_model_forward(A__ )
def UpperCamelCase_ ( self : int , A__ : Tuple=None , A__ : Union[str, Any]=None , A__ : Union[str, Any]=None , A__ : Optional[Any]=None , A__ : Dict=None , A__ : Any=None , A__ : str=None , A__ : Optional[int]=None , ) -> Dict:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_snake_case = input_ids.size()
elif inputs_embeds is not None:
_snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_snake_case = torch.ones(A__ , device=A__ )
if encoder_attention_mask is None:
_snake_case = torch.ones(A__ , device=A__ )
if token_type_ids is None:
_snake_case = torch.zeros(A__ , dtype=torch.long , device=A__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_snake_case = self.get_extended_attention_mask(A__ , A__ , A__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_snake_case = encoder_attention_mask[:, None, None, :]
_snake_case = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_snake_case = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_snake_case = self.get_head_mask(A__ , self.config.num_hidden_layers )
_snake_case = self.embeddings(
input_ids=A__ , position_ids=A__ , token_type_ids=A__ , inputs_embeds=A__ )
_snake_case = self.encoder(
A__ , attention_mask=A__ , head_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(A__ )
_snake_case = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowercase_ ( __lowercase ):
def __init__( self : Union[str, Any] , A__ : Dict , A__ : Optional[Any] ) -> List[str]:
_snake_case = message
_snake_case = exit_layer # start from 1!
class lowercase_ ( nn.Module ):
def __init__( self : Any , A__ : int ) -> Optional[Any]:
super().__init__()
_snake_case = BertPooler(A__ )
_snake_case = nn.Dropout(config.hidden_dropout_prob )
_snake_case = nn.Linear(config.hidden_size , config.num_labels )
def UpperCamelCase_ ( self : Optional[Any] , A__ : str ) -> Optional[int]:
# Pooler
_snake_case = encoder_outputs[0]
_snake_case = self.pooler(A__ )
# "return" pooler_output
# BertModel
_snake_case = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_snake_case = bmodel_output[1]
_snake_case = self.dropout(A__ )
_snake_case = self.classifier(A__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , __lowercase , )
class lowercase_ ( __lowercase ):
def __init__( self : List[str] , A__ : Optional[int] ) -> int:
super().__init__(A__ )
_snake_case = config.num_labels
_snake_case = config.num_hidden_layers
_snake_case = DeeBertModel(A__ )
_snake_case = nn.Dropout(config.hidden_dropout_prob )
_snake_case = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(A__ )
def UpperCamelCase_ ( self : Tuple , A__ : Optional[Any]=None , A__ : List[Any]=None , A__ : Optional[int]=None , A__ : List[Any]=None , A__ : List[Any]=None , A__ : Union[str, Any]=None , A__ : Union[str, Any]=None , A__ : List[Any]=-1 , A__ : str=False , ) -> Dict:
_snake_case = self.num_layers
try:
_snake_case = self.bert(
A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_snake_case = outputs[1]
_snake_case = self.dropout(A__ )
_snake_case = self.classifier(A__ )
_snake_case = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_snake_case = e.message
_snake_case = e.exit_layer
_snake_case = outputs[0]
if not self.training:
_snake_case = entropy(A__ )
_snake_case = []
_snake_case = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_snake_case = MSELoss()
_snake_case = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_snake_case = []
for highway_exit in outputs[-1]:
_snake_case = highway_exit[0]
if not self.training:
highway_logits_all.append(A__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_snake_case = MSELoss()
_snake_case = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A__ )
if train_highway:
_snake_case = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_snake_case = (loss,) + outputs
if not self.training:
_snake_case = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_snake_case = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 278 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = ["pixel_values"]
def __init__( self : int , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : Union[int, float] = 1 / 2_55 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__lowercase = size if size is not None else {'height': 2_56, 'width': 2_56}
__lowercase = get_size_dict(_UpperCAmelCase )
__lowercase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
__lowercase = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self : Any , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : PILImageResampling = PIL.Image.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : str , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_UpperCAmelCase , size=(size['height'], size['width']) , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ) -> Any:
"""simple docstring"""
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : str , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_UpperCAmelCase )
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_UpperCAmelCase , param_name='crop_size' )
__lowercase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__lowercase = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 325 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : List[Any], __snake_case : Any ) -> Tuple:
"""simple docstring"""
A__ : Optional[int] =RemBertConfig.from_json_file(__snake_case )
print("""Building PyTorch model from configuration: {}""".format(str(__snake_case ) ) )
A__ : Union[str, Any] =RemBertModel(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__snake_case, __snake_case, __snake_case )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(__snake_case ) )
torch.save(model.state_dict(), __snake_case )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 136 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any , lowerCAmelCase_ : float ) -> float:
'''simple docstring'''
return 0.0
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ : Tuple =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ : str =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : Any =512
A__ : int =[1] + [0] * (size - 1)
A__ : int =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : List[Any] =np.abs(np.fft.fft(__snake_case ) )
A__ : int =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A__ : Union[str, Any] =get_bounds(__snake_case, __snake_case )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__snake_case )
plt.show()
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : List[Any] =512
A__ : List[Any] =[1] + [0] * (size - 1)
A__ : Dict =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : str =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__snake_case, -2 * pi ) )
plt.show()
| 136 | 1 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bytes:
if len(_UpperCamelCase ) != 32:
raise ValueError("""Input must be of length 32""" )
A_ = b""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bytes:
if i < 0:
raise ValueError("""Input must be non-negative""" )
A_ = format(_UpperCamelCase, """08x""" )[-8:]
A_ = b""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""" )
return little_endian_hex
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bytes:
A_ = b""""""
for char in message:
bit_string += format(_UpperCamelCase, """08b""" ).encode("""utf-8""" )
A_ = format(len(_UpperCamelCase ), """064b""" ).encode("""utf-8""" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCamelCase ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Generator[list[int], None, None]:
if len(_UpperCamelCase ) % 5_12 != 0:
raise ValueError("""Input must have length that's a multiple of 512""" )
for pos in range(0, len(_UpperCamelCase ), 5_12 ):
A_ = bit_string[pos : pos + 5_12]
A_ = []
for i in range(0, 5_12, 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) )
yield block_words
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
A_ = format(_UpperCamelCase, """032b""" )
A_ = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCamelCase, 2 )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
return (a + b) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
if i < 0:
raise ValueError("""Input must be non-negative""" )
if shift < 0:
raise ValueError("""Shift must be non-negative""" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bytes:
A_ = preprocess(_UpperCamelCase )
A_ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A_ = 0X67_45_23_01
A_ = 0XEF_CD_AB_89
A_ = 0X98_BA_DC_FE
A_ = 0X10_32_54_76
A_ = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCamelCase ):
A_ = aa
A_ = ba
A_ = ca
A_ = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A_ = d ^ (b & (c ^ d))
A_ = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A_ = c ^ (d & (b ^ c))
A_ = (5 * i + 1) % 16
elif i <= 47:
A_ = b ^ c ^ d
A_ = (3 * i + 5) % 16
else:
A_ = c ^ (b | not_aa(_UpperCamelCase ))
A_ = (7 * i) % 16
A_ = (f + a + added_consts[i] + block_words[g]) % 2**32
A_ = d
A_ = c
A_ = b
A_ = sum_aa(_UpperCamelCase, left_rotate_aa(_UpperCamelCase, shift_amounts[i] ) )
# Add hashed chunk to running total
A_ = sum_aa(_UpperCamelCase, _UpperCamelCase )
A_ = sum_aa(_UpperCamelCase, _UpperCamelCase )
A_ = sum_aa(_UpperCamelCase, _UpperCamelCase )
A_ = sum_aa(_UpperCamelCase, _UpperCamelCase )
A_ = reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=A ):
"""simple docstring"""
__a = ["""note_seq"""]
def __init__( self : List[Any] , *UpperCamelCase : List[Any] , **UpperCamelCase : Tuple ):
'''simple docstring'''
requires_backends(self , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : str ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
@classmethod
def lowerCamelCase__ ( cls : List[str] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""note_seq"""] )
| 115 | 0 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
_SCREAMING_SNAKE_CASE : Optional[int] = TypeVar("T")
_SCREAMING_SNAKE_CASE : Tuple = Union[List[T], Tuple[T, ...]]
_SCREAMING_SNAKE_CASE : List[Any] = Union[T, List[T], Dict[str, T]]
_SCREAMING_SNAKE_CASE : Any = Union[str, bytes, os.PathLike]
| 92 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"facebook/bart-base": BartForConditionalGeneration}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"facebook/bart-base": BartTokenizer}
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=snake_case , default=snake_case , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=snake_case , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=snake_case , default=snake_case , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case , )
parser.add_argument(
"--config_name" , type=snake_case , default=snake_case , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=snake_case , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=snake_case , default=snake_case , help="Where to store the final ONNX file." )
snake_case_ = parser.parse_args()
return args
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[str]="cpu" ):
'''simple docstring'''
snake_case_ = model_dict[model_name].from_pretrained(snake_case ).to(snake_case )
snake_case_ = tokenizer_dict[model_name].from_pretrained(snake_case )
if model_name in ["facebook/bart-base"]:
snake_case_ = 0
snake_case_ = None
snake_case_ = 0
return huggingface_model, tokenizer
def UpperCamelCase_( snake_case : int , snake_case : Tuple , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
model.eval()
snake_case_ = None
snake_case_ = torch.jit.script(BARTBeamSearchGenerator(snake_case ) )
with torch.no_grad():
snake_case_ = "My friends are cool but they eat too many carbs."
snake_case_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="pt" ).to(model.device )
snake_case_ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=snake_case , max_length=snake_case , early_stopping=snake_case , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case , opset_version=1_4 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=snake_case , )
logger.info("Model exported to {}".format(snake_case ) )
snake_case_ = remove_dup_initializers(os.path.abspath(snake_case ) )
logger.info("Deduplicated and optimized model written to {}".format(snake_case ) )
snake_case_ = onnxruntime.InferenceSession(snake_case )
snake_case_ = ort_sess.run(
snake_case , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(snake_case ),
"max_length": np.array(snake_case ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = parse_args()
snake_case_ = 5
snake_case_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case_ = torch.device(args.device )
snake_case_ , snake_case_ = load_model_tokenizer(args.model_name_or_path , snake_case )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(snake_case )
if args.max_length:
snake_case_ = args.max_length
if args.num_beams:
snake_case_ = args.num_beams
if args.output_file_path:
snake_case_ = args.output_file_path
else:
snake_case_ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(snake_case , snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 92 | 1 |
'''simple docstring'''
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _SCREAMING_SNAKE_CASE (A = 8 ) -> str:
"""simple docstring"""
lowercase__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(A ) for _ in range(A ) )
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
i -= len(A )
lowercase__ = i // 3
lowercase__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowercase__ = (
chars_incl
+ random(A , quotient + remainder )
+ random(A , A )
+ random(A , A )
)
lowercase__ = list(A )
shuffle(A )
return "".join(A )
# random is a generalised function for letters, characters and numbers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
return "".join(secrets.choice(A ) for _ in range(A ) )
def _SCREAMING_SNAKE_CASE (A , A ) -> List[Any]:
"""simple docstring"""
pass # Put your code here...
def _SCREAMING_SNAKE_CASE (A , A ) -> Dict:
"""simple docstring"""
pass # Put your code here...
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
pass # Put your code here...
def _SCREAMING_SNAKE_CASE (A , A = 8 ) -> bool:
"""simple docstring"""
if len(A ) < min_length:
# Your Password must be at least 8 characters long
return False
lowercase__ = any(char in ascii_uppercase for char in password )
lowercase__ = any(char in ascii_lowercase for char in password )
lowercase__ = any(char in digits for char in password )
lowercase__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _SCREAMING_SNAKE_CASE () -> Any:
"""simple docstring"""
lowercase__ = int(input('''Please indicate the max length of your password: ''' ).strip() )
lowercase__ = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(A ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(A , A ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 2 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : str = Mapping[str, np.ndarray]
lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Any = 0.0_1
@dataclasses.dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
lowerCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
lowerCAmelCase__ : Optional[Sequence[int]] = None
def _SCREAMING_SNAKE_CASE (A ) -> Protein:
"""simple docstring"""
lowercase__ = R'''(\[[A-Z]+\]\n)'''
lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0]
lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase__ = ["N", "CA", "C"]
lowercase__ = None
lowercase__ = None
lowercase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase__ = g[1][0].strip()
for i in range(len(A ) ):
if seq[i] not in residue_constants.restypes:
lowercase__ = '''X''' # FIXME: strings are immutable
lowercase__ = np.array(
[residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase__ = []
for axis in range(3 ):
tertiary.append(list(map(A , g[1][axis].split() ) ) )
lowercase__ = np.array(A )
lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase__ = np.zeros(
(
len(A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(A ):
lowercase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , )
def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
lowercase__ = prot.parents
lowercase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase__ = [p for i, p in zip(A , A ) if i == chain_id]
if parents is None or len(A ) == 0:
lowercase__ = ['''N/A''']
pdb_headers.append(f"PARENT {' '.join(A )}" )
return pdb_headers
def _SCREAMING_SNAKE_CASE (A , A ) -> str:
"""simple docstring"""
lowercase__ = []
lowercase__ = pdb_str.split('''\n''' )
lowercase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
lowercase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowercase__ = []
if prot.parents_chain_index is not None:
lowercase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(A ) , [] )
parent_dict[str(A )].append(A )
lowercase__ = max([int(A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] )
parents_per_chain.append(A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase__ = [['''N/A''']]
def make_parent_line(A ) -> str:
return f"PARENT {' '.join(A )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase__ = 0
for i, l in enumerate(A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(A ):
lowercase__ = parents_per_chain[chain_counter]
else:
lowercase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(A ) )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
lowercase__ = residue_constants.restypes + ['''X''']
def res_atoa(A ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase__ = residue_constants.atom_types
lowercase__ = []
lowercase__ = prot.atom_mask
lowercase__ = prot.aatype
lowercase__ = prot.atom_positions
lowercase__ = prot.residue_index.astype(np.intaa )
lowercase__ = prot.b_factors
lowercase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase__ = get_pdb_headers(A )
if len(A ) > 0:
pdb_lines.extend(A )
lowercase__ = aatype.shape[0]
lowercase__ = 1
lowercase__ = 0
lowercase__ = string.ascii_uppercase
lowercase__ = None
# Add all atom sites.
for i in range(A ):
lowercase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase__ = '''ATOM'''
lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}"
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = 1.00
lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase__ = ''''''
lowercase__ = '''A'''
if chain_index is not None:
lowercase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase__ = (
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(A )
atom_index += 1
lowercase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase__ = True
lowercase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase__ = '''TER'''
lowercase__ = (
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(A , A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(A )
def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
| 2 | 1 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : Optional[int] = np.max(_outputs , axis=-1 , keepdims=UpperCamelCase__ )
_UpperCAmelCase : Any = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''sigmoid'''
a__ ='''softmax'''
a__ ='''none'''
@add_end_docstrings(
a ,R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' ,)
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =False
a__ =ClassificationFunction.NONE
def __init__( self , **A ) -> str:
super().__init__(**A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , A=None , A=None , A="" , **A ) -> Dict:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
_UpperCAmelCase : Any = tokenizer_kwargs
_UpperCAmelCase : Optional[Any] = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
_UpperCAmelCase : Optional[int] = self.model.config.return_all_scores
if isinstance(A , A ) or top_k is None:
_UpperCAmelCase : Any = top_k
_UpperCAmelCase : List[Any] = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , A , )
if return_all_scores:
_UpperCAmelCase : Dict = None
else:
_UpperCAmelCase : Tuple = 1
if isinstance(A , A ):
_UpperCAmelCase : int = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_UpperCAmelCase : Union[str, Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *A , **A ) -> Dict:
_UpperCAmelCase : Optional[int] = super().__call__(*A , **A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_UpperCAmelCase : List[str] = '''top_k''' not in kwargs
if isinstance(args[0] , A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , A , **A ) -> Dict[str, GenericTensor]:
_UpperCAmelCase : Union[str, Any] = self.framework
if isinstance(A , A ):
return self.tokenizer(**A , return_tensors=A , **A )
elif isinstance(A , A ) and len(A ) == 1 and isinstance(inputs[0] , A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=A , **A )
elif isinstance(A , A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' )
return self.tokenizer(A , return_tensors=A , **A )
def __lowerCAmelCase ( self , A ) -> List[Any]:
return self.model(**A )
def __lowerCAmelCase ( self , A , A=None , A=1 , A=True ) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_UpperCAmelCase : Any = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_UpperCAmelCase : int = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
_UpperCAmelCase : Optional[int] = self.model.config.function_to_apply
else:
_UpperCAmelCase : str = ClassificationFunction.NONE
_UpperCAmelCase : int = model_outputs['''logits'''][0]
_UpperCAmelCase : Optional[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_UpperCAmelCase : List[Any] = sigmoid(A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_UpperCAmelCase : List[Any] = softmax(A )
elif function_to_apply == ClassificationFunction.NONE:
_UpperCAmelCase : Dict = outputs
else:
raise ValueError(f'Unrecognized `function_to_apply` argument: {function_to_apply}' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_UpperCAmelCase : Optional[int] = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(A )
]
if not _legacy:
dict_scores.sort(key=lambda A : x["score"] , reverse=A )
if top_k is not None:
_UpperCAmelCase : Tuple = dict_scores[:top_k]
return dict_scores
| 68 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Dict:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_UpperCAmelCase : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase : List[str] = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids , A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_UpperCAmelCase : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def __lowerCAmelCase ( self ) -> Tuple:
_UpperCAmelCase : Optional[Any] = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase : Optional[int] = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = dc.update(1 )
_UpperCAmelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
_UpperCAmelCase : Any = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(3 )
_UpperCAmelCase : Optional[int] = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase : Any = DisjunctiveConstraint(A )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
A_ = {
'''configuration_audio_spectrogram_transformer''': [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ASTConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ASTForAudioClassification''',
'''ASTModel''',
'''ASTPreTrainedModel''',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''ASTFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_UpperCamelCase : Tuple = int(input('Enter number: ').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 220 | 0 |
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
__snake_case = 'dandelin/vilt-b32-finetuned-vqa'
__snake_case = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
__snake_case = 'image_qa'
__snake_case = AutoProcessor
__snake_case = AutoModelForVisualQuestionAnswering
__snake_case = ['image', 'text']
__snake_case = ['text']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''vision'''] )
super().__init__(*UpperCamelCase__, **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase__, UpperCamelCase__, return_tensors='''pt''' )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase__ ).logits
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 359 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 167 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[Any] = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
_UpperCAmelCase : int = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("RGB" )
return image
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = dct.pop(__lowerCAmelCase )
_UpperCAmelCase : List[str] = val
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase : List[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
_UpperCAmelCase : Tuple = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
_UpperCAmelCase : Dict = torch.cat((q_bias, torch.zeros_like(__lowerCAmelCase , requires_grad=__lowerCAmelCase ), v_bias) )
_UpperCAmelCase : int = qkv_bias
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = 364 if "coco" in model_name else 224
_UpperCAmelCase : Optional[Any] = InstructBlipVisionConfig(image_size=__lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_UpperCAmelCase : Dict = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase : Union[str, Any] = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_UpperCAmelCase : Union[str, Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf" , vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
_UpperCAmelCase : List[str] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf" , vocab_size=32_001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_UpperCAmelCase : str = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
_UpperCAmelCase : Optional[int] = InstructBlipConfig(vision_config=__lowerCAmelCase , text_config=__lowerCAmelCase , qformer_config=__lowerCAmelCase )
return config, image_size
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("bert-base-uncased" , truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
_UpperCAmelCase : Dict = TaTokenizerFast.from_pretrained("google/flan-t5-xl" , truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_UpperCAmelCase : Dict = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b" , truncation_side="left" , bos_token="</s>" , unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
_UpperCAmelCase , _UpperCAmelCase : Tuple = get_blipa_config(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = InstructBlipForConditionalGeneration(__lowerCAmelCase ).eval()
_UpperCAmelCase : List[str] = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
_UpperCAmelCase , _UpperCAmelCase : List[Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
_UpperCAmelCase : Tuple = "cuda:1" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase : List[Any] = "cuda:2" if torch.cuda.is_available() else "cpu"
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = load_model_and_preprocess(
name=__lowerCAmelCase , model_type=__lowerCAmelCase , is_eval=__lowerCAmelCase , device=__lowerCAmelCase )
original_model.eval()
print("Done!" )
# update state dict keys
_UpperCAmelCase : List[str] = original_model.state_dict()
_UpperCAmelCase : int = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase : Union[str, Any] = state_dict.pop(__lowerCAmelCase )
if key.startswith("Qformer.bert" ):
_UpperCAmelCase : List[Any] = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
_UpperCAmelCase : Any = key.replace("self" , "attention" )
if "llm_proj" in key:
_UpperCAmelCase : str = key.replace("llm_proj" , "language_projection" )
if "t5_proj" in key:
_UpperCAmelCase : Dict = key.replace("t5_proj" , "language_projection" )
if key.startswith("llm_model" ):
_UpperCAmelCase : int = key.replace("llm_model" , "language_model" )
if key.startswith("t5" ):
_UpperCAmelCase : List[Any] = key.replace("t5" , "language" )
_UpperCAmelCase : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowerCAmelCase , __lowerCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
_UpperCAmelCase : Tuple = load_demo_image()
_UpperCAmelCase : str = "What is unusual about this image?"
# create processor
_UpperCAmelCase : Optional[int] = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
_UpperCAmelCase : Any = InstructBlipProcessor(
image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase , )
_UpperCAmelCase : Any = processor(images=__lowerCAmelCase , text=__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# make sure processor creates exact same pixel values
_UpperCAmelCase : List[str] = vis_processors["eval"](__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __lowerCAmelCase )
original_model.to(__lowerCAmelCase )
hf_model.to(__lowerCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
_UpperCAmelCase : Any = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
_UpperCAmelCase : Any = hf_model(**__lowerCAmelCase ).logits
else:
_UpperCAmelCase : List[str] = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
_UpperCAmelCase : Dict = tokenizer("\n" , return_tensors="pt" ).input_ids.to(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
_UpperCAmelCase : Optional[Any] = hf_model(**__lowerCAmelCase , labels=__lowerCAmelCase ).logits
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_UpperCAmelCase : int = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __lowerCAmelCase , atol=__lowerCAmelCase )
print("Looks ok!" )
print("Generating with original model..." )
_UpperCAmelCase : Optional[int] = original_model.generate({"image": original_pixel_values, "prompt": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
_UpperCAmelCase : Union[str, Any] = hf_model.generate(
**__lowerCAmelCase , do_sample=__lowerCAmelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_UpperCAmelCase : str = 2
print("Original generation:" , __lowerCAmelCase )
_UpperCAmelCase : Dict = processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = [text.strip() for text in output_text]
print("HF generation:" , __lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
lowerCamelCase__ = [
'instructblip-vicuna-7b',
'instructblip-vicuna-13b',
'instructblip-flan-t5-xl',
'instructblip-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='instructblip-flan-t5-xl',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowerCamelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 234 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCAmelCase__ ) , "Tatoeba directory does not exist." )
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.resolver.write_model_card("opus-mt-he-en" , dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 234 | 1 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( A__ ):
'''simple docstring'''
def snake_case_ ( self , _snake_case ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
UpperCAmelCase = {'''source''': '''What is love ?''', '''target''': '''life'''}
UpperCAmelCase = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCAmelCase = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(_snake_case , f"""{split}.{field}""" ) , '''w''' ) as f:
f.write(_snake_case )
def snake_case_ ( self , _snake_case , _snake_case = "pytorch" ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = os.path.join(_snake_case , '''output''' )
UpperCAmelCase = os.path.join(_snake_case , '''data''' )
self._create_dummy_data(data_dir=_snake_case )
UpperCAmelCase = f"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(f"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_snake_case , env=self.get_env() )
UpperCAmelCase = os.path.join(_snake_case , '''metrics.json''' )
with open(_snake_case ) as f:
UpperCAmelCase = json.load(_snake_case )
return result
@require_torch_gpu
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 371 |
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
UpperCAmelCase = 6
UpperCAmelCase = 1
UpperCAmelCase = 1901
UpperCAmelCase = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
UpperCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
UpperCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
UpperCAmelCase = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 152 | 0 |
from __future__ import annotations
from math import pi
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(size + 1 )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for row in range(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = matrix[row][col]
SCREAMING_SNAKE_CASE_ = vector[row][0]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCamelCase, __lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE_ = 0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, __lowerCamelCase ):
for row in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCamelCase, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 10 )] for row in range(__lowerCamelCase )
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[0 for _ in range(__lowerCamelCase )] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = [[0] for _ in range(__lowerCamelCase )]
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for x_val, y_val in enumerate(__lowerCamelCase ):
for col in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = (x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE_ = y_val
SCREAMING_SNAKE_CASE_ = solve(__lowerCamelCase, __lowerCamelCase )
def interpolated_func(__lowerCamelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCamelCase ) )
return interpolated_func
def A__ ( __lowerCamelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( __lowerCamelCase = question_function, __lowerCamelCase = 10 ):
SCREAMING_SNAKE_CASE_ = [func(__lowerCamelCase ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for poly in polynomials:
SCREAMING_SNAKE_CASE_ = 1
while func(__lowerCamelCase ) == poly(__lowerCamelCase ):
x_val += 1
ret += poly(__lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 299 | 0 |
from PIL import Image
def lowerCamelCase__ ( UpperCamelCase__ : Image , UpperCamelCase__ : float ) -> Image:
'''simple docstring'''
def brightness(UpperCamelCase__ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
UpperCAmelCase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 352 |
from cva import destroyAllWindows, imread, imshow, waitKey
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
_snake_case = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
UpperCAmelCase_ = imread("""image_data/lena.jpg""", 1)
# convert to its negative
UpperCAmelCase_ = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 295 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, ) -> Any:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=__lowercase, speech_processor=__lowercase, vae=__lowercase, text_encoder=__lowercase, tokenizer=__lowercase, unet=__lowercase, scheduler=__lowercase, feature_extractor=__lowercase, )
def UpperCamelCase__ ( self, __magic_name__ = "auto" ) -> Dict:
"""simple docstring"""
if slice_size == "auto":
UpperCamelCase__ : Optional[int] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowercase )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
self.enable_attention_slicing(__lowercase )
@torch.no_grad()
def __call__( self, __magic_name__, __magic_name__=16000, __magic_name__ = 512, __magic_name__ = 512, __magic_name__ = 50, __magic_name__ = 7.5, __magic_name__ = None, __magic_name__ = 1, __magic_name__ = 0.0, __magic_name__ = None, __magic_name__ = None, __magic_name__ = "pil", __magic_name__ = True, __magic_name__ = None, __magic_name__ = 1, **__magic_name__, ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.speech_processor.feature_extractor(
__lowercase, return_tensors='''pt''', sampling_rate=__lowercase ).input_features.to(self.device )
UpperCamelCase__ : Optional[int] = self.speech_model.generate(__lowercase, max_length=480000 )
UpperCamelCase__ : Optional[int] = self.speech_processor.tokenizer.batch_decode(__lowercase, skip_special_tokens=__lowercase, normalize=__lowercase )[
0
]
if isinstance(__lowercase, __lowercase ):
UpperCamelCase__ : str = 1
elif isinstance(__lowercase, __lowercase ):
UpperCamelCase__ : Tuple = len(__lowercase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__lowercase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase, __lowercase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__lowercase )}." )
# get prompt text embeddings
UpperCamelCase__ : int = self.tokenizer(
__lowercase, padding='''max_length''', max_length=self.tokenizer.model_max_length, return_tensors='''pt''', )
UpperCamelCase__ : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase__ : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = text_embeddings.shape
UpperCamelCase__ : List[Any] = text_embeddings.repeat(1, __lowercase, 1 )
UpperCamelCase__ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt, __lowercase, -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase__ : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase__ : Dict = 42
if negative_prompt is None:
UpperCamelCase__ : Optional[int] = [''''''] * batch_size
elif type(__lowercase ) is not type(__lowercase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__lowercase )} !="
f" {type(__lowercase )}." )
elif isinstance(__lowercase, __lowercase ):
UpperCamelCase__ : str = [negative_prompt]
elif batch_size != len(__lowercase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__lowercase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
''' the batch size of `prompt`.''' )
else:
UpperCamelCase__ : Tuple = negative_prompt
UpperCamelCase__ : Dict = text_input_ids.shape[-1]
UpperCamelCase__ : Optional[int] = self.tokenizer(
__lowercase, padding='''max_length''', max_length=__lowercase, truncation=__lowercase, return_tensors='''pt''', )
UpperCamelCase__ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ : int = uncond_embeddings.shape[1]
UpperCamelCase__ : List[str] = uncond_embeddings.repeat(1, __lowercase, 1 )
UpperCamelCase__ : int = uncond_embeddings.view(batch_size * num_images_per_prompt, __lowercase, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase__ : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase__ : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase__ : Optional[Any] = torch.randn(__lowercase, generator=__lowercase, device='''cpu''', dtype=__lowercase ).to(
self.device )
else:
UpperCamelCase__ : int = torch.randn(__lowercase, generator=__lowercase, device=self.device, dtype=__lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase__ : List[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase__ : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ : Optional[Any] = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : Dict = {}
if accepts_eta:
UpperCamelCase__ : List[Any] = eta
for i, t in enumerate(self.progress_bar(__lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ : int = self.scheduler.scale_model_input(__lowercase, __lowercase )
# predict the noise residual
UpperCamelCase__ : str = self.unet(__lowercase, __lowercase, encoder_hidden_states=__lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = noise_pred.chunk(2 )
UpperCamelCase__ : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : str = self.scheduler.step(__lowercase, __lowercase, __lowercase, **__lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase, __lowercase, __lowercase )
UpperCamelCase__ : str = 1 / 0.1_8215 * latents
UpperCamelCase__ : Dict = self.vae.decode(__lowercase ).sample
UpperCamelCase__ : Tuple = (image / 2 + 0.5).clamp(0, 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ : Any = image.cpu().permute(0, 2, 3, 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ : Any = self.numpy_to_pil(__lowercase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__lowercase, nsfw_content_detected=__lowercase )
| 201 |
'''simple docstring'''
UpperCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 141 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__A = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[int]:
if isinstance(UpperCAmelCase__ , np.ndarray ):
return list(tensor.shape )
lowercase__: Tuple = tf.shape(UpperCAmelCase__ )
if tensor.shape == tf.TensorShape(UpperCAmelCase__ ):
return dynamic
lowercase__: Dict = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase__ )]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1e-9 , axis=UpperCAmelCase__ , name=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-5 , __UpperCAmelCase=-1 ) -> List[str]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
lowercase__: List[str] = tf.nn.moments(UpperCAmelCase__ , axes=[axis] , keepdims=UpperCAmelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
lowercase__: List[Any] = [1] * inputs.shape.rank
lowercase__: List[str] = shape_list(UpperCAmelCase__ )[axis]
lowercase__: List[str] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase__: List[Any] = tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
# Compute layer normalization using the batch_normalization
# function.
lowercase__: str = tf.nn.batch_normalization(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , offset=UpperCAmelCase__ , scale=UpperCAmelCase__ , variance_epsilon=UpperCAmelCase__ , )
return outputs
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=0 , __UpperCAmelCase=-1 ) -> Dict:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
lowercase__: List[Any] = tf.shape(UpperCAmelCase__ )
lowercase__: Union[str, Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
lowercase__: Dict = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase__ , UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> tf.Tensor:
if not isinstance(UpperCAmelCase__ , tf.Tensor ):
lowercase__: List[Any] = tf.convert_to_tensor(UpperCAmelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
lowercase__: Any = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
lowercase__: List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
lowercase__: Optional[Any] = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "input_ids" ) -> None:
tf.debugging.assert_less(
UpperCAmelCase__ , tf.cast(UpperCAmelCase__ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase__ )}) must be smaller than the embedding """
F"""layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowercase__: int = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
lowercase__: Optional[Any] = [x for x in data if len(UpperCAmelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
lowercase__: Any = np.asarray(UpperCAmelCase__ )
lowercase__: Union[str, Any] = 1
lowercase__: Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
lowercase__: Optional[Any] = np.array_split(UpperCAmelCase__ , UpperCAmelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase__ ):
lowercase__: Union[str, Any] = chunk_data
else:
lowercase__: Any = data
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if name in group.attrs:
lowercase__: Optional[Any] = [n.decode('''utf8''' ) if hasattr(UpperCAmelCase__ , '''decode''' ) else n for n in group.attrs[name]]
else:
lowercase__: int = []
lowercase__: Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(UpperCAmelCase__ , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
def _expand_single_ad_tensor(__UpperCAmelCase ):
if isinstance(UpperCAmelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase__ )
| 370 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_lowerCAmelCase = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
_lowerCAmelCase = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_a , _a )
def _snake_case ( self , **_lowerCAmelCase ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _snake_case ( self , **_lowerCAmelCase ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _snake_case ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_lowerCAmelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(_a , return_tensors="np" )
_lowerCAmelCase = processor(images=_a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = "lower newer"
_lowerCAmelCase = processor(text=_a )
_lowerCAmelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = "lower newer"
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(_a )
_lowerCAmelCase = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
_lowerCAmelCase = "lower newer"
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 158 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ = False ) -> str:
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected string as input, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
lowerCamelCase = F'Expected boolean as use_pascal parameter, found {type(snake_case__ )}'
raise ValueError(snake_case__ )
lowerCamelCase = input_str.split("""_""" )
lowerCamelCase = 0 if use_pascal else 1
lowerCamelCase = words[start_index:]
lowerCamelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCamelCase = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=_lowercase , )
assert hasattr(self , "env" )
def UpperCAmelCase ( self :str , _lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = {
"enabled": True,
"processes_per_host": 8,
}
lowercase__ = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowercase__ = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowercase__ = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_lowercase , instance_type=self.instance_type , debugger_hook_config=_lowercase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=_lowercase , py_version="py36" , )
def UpperCAmelCase ( self :List[Any] , _lowercase :Union[str, Any] ):
'''simple docstring'''
TrainingJobAnalytics(_lowercase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.create_estimator(_lowercase )
# run training
estimator.fit()
# result dataframe
lowercase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowercase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowercase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _lowercase )
| 367 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Whether tp freeze the encoder.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__lowerCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__lowerCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__lowerCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__lowerCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__lowerCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Source language id for translation.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Target language id for translation.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': '# num_beams to use for evaluation.'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__magic_name__ , os.path.join(__magic_name__ , f'''{split}_results.json''' ) )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
check_output_dir(__magic_name__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(__magic_name__ , __magic_name__ , __magic_name__ ):
assert hasattr(__magic_name__ , __magic_name__ ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__magic_name__ , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__magic_name__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase__ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__magic_name__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__magic_name__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase__ = SeqaSeqDataset
# Get datasets
lowercase__ = (
dataset_class(
__magic_name__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowercase__ = (
dataset_class(
__magic_name__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase__ = (
dataset_class(
__magic_name__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase__ = (
build_compute_metrics_fn(data_args.task , __magic_name__ ) if training_args.predict_with_generate else None
)
lowercase__ = SeqaSeqTrainer(
model=__magic_name__ , args=__magic_name__ , data_args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , data_collator=SeqaSeqDataCollator(
__magic_name__ , __magic_name__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , )
lowercase__ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowercase__ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase__ = train_result.metrics
lowercase__ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate(metric_key_prefix="val" )
lowercase__ = data_args.n_val
lowercase__ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowercase__ = trainer.predict(test_dataset=__magic_name__ , metric_key_prefix="test" )
lowercase__ = test_output.metrics
lowercase__ = data_args.n_test
if trainer.is_world_process_zero():
lowercase__ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
if training_args.predict_with_generate:
lowercase__ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
lowercase__ = lmap(str.strip , __magic_name__ )
write_txt_file(__magic_name__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(__magic_name__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def _A ( __magic_name__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 201 | 0 |
import sys
import turtle
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
triangle(snake_case__ , get_mid(snake_case__ , snake_case__ ) , get_mid(snake_case__ , snake_case__ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
lowercase__ : int = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
lowercase__ : Dict = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 338 | from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """trocr"""
SCREAMING_SNAKE_CASE__ : str = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : str = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self , lowercase_=5_0265 , lowercase_=1024 , lowercase_=12 , lowercase_=16 , lowercase_=4096 , lowercase_="gelu" , lowercase_=512 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=2 , lowercase_=0.02 , lowercase_=0.0 , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=1 , lowercase_=0 , lowercase_=2 , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = d_model
UpperCAmelCase_ : Optional[int] = decoder_layers
UpperCAmelCase_ : Any = decoder_attention_heads
UpperCAmelCase_ : Optional[int] = decoder_ffn_dim
UpperCAmelCase_ : Any = activation_function
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = dropout
UpperCAmelCase_ : List[Any] = attention_dropout
UpperCAmelCase_ : List[Any] = activation_dropout
UpperCAmelCase_ : List[str] = init_std
UpperCAmelCase_ : Tuple = decoder_layerdrop
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : Union[str, Any] = scale_embedding
UpperCAmelCase_ : Dict = use_learned_position_embeddings
UpperCAmelCase_ : List[str] = layernorm_embedding
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 23 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = """ctrl"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""past_key_values"""]
SCREAMING_SNAKE_CASE__ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase_=24_6534 , lowercase_=256 , lowercase_=1280 , lowercase_=8192 , lowercase_=48 , lowercase_=16 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1E-6 , lowercase_=0.02 , lowercase_=True , **lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : List[str] = n_embd
UpperCAmelCase_ : Dict = n_layer
UpperCAmelCase_ : Optional[int] = n_head
UpperCAmelCase_ : List[str] = dff
UpperCAmelCase_ : Tuple = resid_pdrop
UpperCAmelCase_ : Optional[Any] = embd_pdrop
UpperCAmelCase_ : str = layer_norm_epsilon
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : List[str] = use_cache
super().__init__(**lowercase_ )
| 23 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase ( _UpperCAmelCase ):
def _snake_case ( self , lowercase ) -> List[str]:
with open(lowercase , encoding="""utf-8""" ) as input_file:
lowerCAmelCase = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
lowerCAmelCase = input_file.read()
lowerCAmelCase = regexp.search(lowercase )
return match
def _snake_case ( self , lowercase ) -> Union[str, Any]:
with open(lowercase , encoding="""utf-8""" ) as input_file:
lowerCAmelCase = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
lowerCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCAmelCase = regexp.finditer(lowercase )
lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = Path("""./datasets""" )
lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = Path("""./datasets""" )
lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowercase ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 46 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return [ord(SCREAMING_SNAKE_CASE ) - 96 for elem in plain]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
return "".join(chr(elem + 96 ) for elem in encoded )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , SCREAMING_SNAKE_CASE )
print("""Decoded:""" , decode(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 46 | 1 |
import argparse
from collections import defaultdict
import yaml
UpperCamelCase__ : Tuple = """docs/source/en/_toctree.yml"""
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]:
"""simple docstring"""
a = defaultdict(snake_case_ )
for doc in model_doc:
counts[doc["local"]] += 1
a = [key for key, value in counts.items() if value > 1]
a = []
for duplicate_key in duplicates:
a = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(snake_case_ ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(snake_case_, key=lambda snake_case_ : s["title"].lower() )
def SCREAMING_SNAKE_CASE__ ( snake_case_=False ) -> List[str]:
"""simple docstring"""
with open(snake_case_, encoding='''utf-8''' ) as f:
a = yaml.safe_load(f.read() )
# Get to the API doc
a = 0
while content[api_idx]["title"] != "API":
api_idx += 1
a = content[api_idx]['''sections''']
# Then to the model doc
a = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
a = api_doc[model_idx]['''sections''']
a = [(idx, section) for idx, section in enumerate(snake_case_ ) if '''sections''' in section]
a = False
for idx, modality_doc in modalities_docs:
a = modality_doc['''sections''']
a = clean_model_doc_toc(snake_case_ )
if old_modality_doc != new_modality_doc:
a = True
if overwrite:
a = new_modality_doc
if diff:
if overwrite:
a = model_doc
a = api_doc
with open(snake_case_, '''w''', encoding='''utf-8''' ) as f:
f.write(yaml.dump(snake_case_, allow_unicode=snake_case_ ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCamelCase__ : int = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 330 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'yolos'
def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = num_detection_tokens
a = use_mid_position_embeddings
a = auxiliary_loss
# Hungarian matcher
a = class_cost
a = bbox_cost
a = giou_cost
# Loss coefficients
a = bbox_loss_coefficient
a = giou_loss_coefficient
a = eos_coefficient
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
return 12
| 330 | 1 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : torch.FloatTensor
_SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None
def UpperCamelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=0.9_9_9 , snake_case_ : List[Any]="cosine" , ) -> Optional[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case_ : Optional[Any] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case_ : Any ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowerCAmelCase = []
for i in range(SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = i / num_diffusion_timesteps
__lowerCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
return torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
@register_to_config
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] = 10_00 , SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0_0_0_1 , SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.0_2 , SCREAMING_SNAKE_CASE__ : Optional[Any] = "linear" , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = True , SCREAMING_SNAKE_CASE__ : Union[str, Any] = True , SCREAMING_SNAKE_CASE__ : Tuple = 0 , SCREAMING_SNAKE_CASE__ : Optional[Any] = "epsilon" , SCREAMING_SNAKE_CASE__ : int = 1.0 , **SCREAMING_SNAKE_CASE__ : int , ) -> int:
if kwargs.get("""set_alpha_to_one""" , SCREAMING_SNAKE_CASE__ ) is not None:
__lowerCAmelCase = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
__lowerCAmelCase = torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCAmelCase = torch.linspace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase = betas_for_alpha_bar(SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__lowerCAmelCase = 1.0 - self.betas
__lowerCAmelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__lowerCAmelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__lowerCAmelCase = 1.0
# setable values
__lowerCAmelCase = None
__lowerCAmelCase = torch.from_numpy(np.arange(0 , SCREAMING_SNAKE_CASE__ ).copy().astype(np.intaa ) )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any = None ) -> Any:
return sample
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple = None ) -> str:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
f""" maximal {self.config.num_train_timesteps} timesteps.""" )
__lowerCAmelCase = num_inference_steps
__lowerCAmelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCAmelCase = (np.arange(0 , SCREAMING_SNAKE_CASE__ ) * step_ratio).round().copy().astype(np.intaa )
__lowerCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
self.timesteps += self.config.steps_offset
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict = 0.0 , SCREAMING_SNAKE_CASE__ : str = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : Dict = True , ) -> Any:
__lowerCAmelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__lowerCAmelCase = self.alphas_cumprod[timestep]
__lowerCAmelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__lowerCAmelCase = model_output
elif self.config.prediction_type == "sample":
__lowerCAmelCase = model_output
__lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__lowerCAmelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__lowerCAmelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__lowerCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__ , pred_original_sample=SCREAMING_SNAKE_CASE__ )
def __len__( self : Tuple ) -> List[Any]:
return self.config.num_train_timesteps
| 229 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Optional[Any] = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __snake_case ( lowerCAmelCase ):
_a : str= "gpt_neo"
_a : Optional[int]= ["past_key_values"]
_a : Dict= {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self ,snake_case=50257 ,snake_case=2048 ,snake_case=2048 ,snake_case=24 ,snake_case=[[["global", "local"], 12]] ,snake_case=16 ,snake_case=None ,snake_case=256 ,snake_case="gelu_new" ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.1 ,snake_case=1e-5 ,snake_case=0.02 ,snake_case=True ,snake_case=50256 ,snake_case=50256 ,**snake_case ,):
'''simple docstring'''
lowercase : int = vocab_size
lowercase : Union[str, Any] = max_position_embeddings
lowercase : Dict = hidden_size
lowercase : Union[str, Any] = num_layers
lowercase : Union[str, Any] = num_heads
lowercase : Optional[int] = intermediate_size
lowercase : List[str] = window_size
lowercase : Optional[int] = activation_function
lowercase : List[str] = resid_dropout
lowercase : int = embed_dropout
lowercase : Optional[int] = attention_dropout
lowercase : Tuple = classifier_dropout
lowercase : Optional[int] = layer_norm_epsilon
lowercase : Dict = initializer_range
lowercase : List[str] = use_cache
lowercase : Optional[int] = bos_token_id
lowercase : int = eos_token_id
lowercase : Union[str, Any] = attention_types
lowercase : Dict = self.expand_attention_types_params(snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"but is `len(config.attention_layers) = {len(self.attention_layers )}`, "
f"`config.num_layers = {self.num_layers}`. "
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=snake_case ,eos_token_id=snake_case ,**snake_case )
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : List[Any] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
import torch
lowercase : Tuple = input.size()
lowercase : int = len(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = shape[dimension]
lowercase : int = torch.arange(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.div(sizedim - size , SCREAMING_SNAKE_CASE__ , rounding_mode="""floor""" ) + 1
lowercase : Dict = torch.arange(SCREAMING_SNAKE_CASE__ ) + low_indices[:min_length][:, None]
lowercase : Union[str, Any] = [slice(SCREAMING_SNAKE_CASE__ )] * rank
lowercase : Optional[Any] = indices
lowercase : List[str] = input[s]
lowercase : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
import torch
lowercase : Union[str, Any] = torch.arange(1 , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = torch.remainder(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = remainders == 0
lowercase : Optional[int] = candidates[divisor_indices]
lowercase : List[Any] = torch.max(SCREAMING_SNAKE_CASE__ )
return largest_divisor, torch.div(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rounding_mode="""floor""" )
class __snake_case ( lowerCAmelCase ):
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case ,direction="""inputs""" )
lowercase : Dict = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowercase : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return self._config.num_heads
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = -1 ,snake_case = -1 ,snake_case = False ,snake_case = None ,):
'''simple docstring'''
lowercase : Any = super(snake_case ,self ).generate_dummy_inputs(
snake_case ,batch_size=snake_case ,seq_length=snake_case ,is_pair=snake_case ,framework=snake_case )
# We need to order the input in the way they appears in the forward()
lowercase : List[str] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase : List[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase : Optional[int] = seqlen + 2
lowercase : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase : Optional[Any] = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(self.num_layers )
]
lowercase : Optional[Any] = common_inputs["""attention_mask"""]
if self.use_past:
lowercase : Any = ordered_inputs["""attention_mask"""].dtype
lowercase : Union[str, Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case ,snake_case ,dtype=snake_case )] ,dim=1 )
return ordered_inputs
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 13
| 20 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str:
lowerCamelCase__ : int = AutoConfig.from_pretrained(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp']
if config.model_type == "t5":
lowerCamelCase__ : Any = 'SelfAttention'
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__ : List[str] = 'LocalSelfAttention'
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : List[Any] = 'TransientGlobalSelfAttention'
else:
raise ValueError(
'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`'
' attribute with a value from [\'local\', \'transient-global].' )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Dict = F"""layers_{str(_UpperCAmelCase )}"""
# Self-Attention
lowerCamelCase__ : Union[str, Any] = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel']
lowerCamelCase__ : Optional[Any] = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel']
lowerCamelCase__ : List[str] = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel']
lowerCamelCase__ : Dict = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel']
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : List[Any] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale']
# Layer Normalization
lowerCamelCase__ : Tuple = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale']
if split_mlp_wi:
lowerCamelCase__ : str = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel']
lowerCamelCase__ : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowerCamelCase__ : List[str] = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel']
lowerCamelCase__ : int = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowerCamelCase__ : Any = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowerCamelCase__ : Optional[Any] = flax_model.params['encoder']['block'][str(_UpperCAmelCase )]['layer']
lowerCamelCase__ : Dict = tax_attention_key
lowerCamelCase__ : Optional[Any] = tax_attention_out
lowerCamelCase__ : List[Any] = tax_attention_query
lowerCamelCase__ : Any = tax_attention_value
lowerCamelCase__ : Tuple = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : Optional[Any] = tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Optional[Any] = tax_mlp_wi_a
lowerCamelCase__ : int = tax_mlp_wi_a
else:
lowerCamelCase__ : List[Any] = tax_mlp_wi
lowerCamelCase__ : Dict = tax_mlp_wo
lowerCamelCase__ : int = tax_mlp_layer_norm
lowerCamelCase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__ : Any = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T
lowerCamelCase__ : Union[str, Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__ : List[Any] = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T
lowerCamelCase__ : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__ : Any = tax_model['target']['encoder']['encoder_norm']['scale']
lowerCamelCase__ : Optional[int] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__ : Optional[int] = F"""layers_{str(_UpperCAmelCase )}"""
# Self-Attention
lowerCamelCase__ : Tuple = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel']
lowerCamelCase__ : int = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel']
lowerCamelCase__ : Dict = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel']
lowerCamelCase__ : Any = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel']
# Layer Normalization
lowerCamelCase__ : Optional[Any] = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][
'scale'
]
# Encoder-Decoder-Attention
lowerCamelCase__ : List[str] = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention']
lowerCamelCase__ : Optional[int] = tax_enc_dec_attention_module['key']['kernel']
lowerCamelCase__ : List[str] = tax_enc_dec_attention_module['out']['kernel']
lowerCamelCase__ : Dict = tax_enc_dec_attention_module['query']['kernel']
lowerCamelCase__ : Dict = tax_enc_dec_attention_module['value']['kernel']
# Layer Normalization
lowerCamelCase__ : Tuple = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale']
# MLP
if split_mlp_wi:
lowerCamelCase__ : List[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel']
lowerCamelCase__ : List[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel']
else:
lowerCamelCase__ : int = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel']
lowerCamelCase__ : int = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel']
# Layer Normalization
lowerCamelCase__ : Union[str, Any] = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale']
# Assigning
lowerCamelCase__ : Optional[Any] = flax_model.params['decoder']['block'][str(_UpperCAmelCase )]['layer']
lowerCamelCase__ : Union[str, Any] = tax_attention_key
lowerCamelCase__ : List[Any] = tax_attention_out
lowerCamelCase__ : Optional[int] = tax_attention_query
lowerCamelCase__ : int = tax_attention_value
lowerCamelCase__ : Dict = tax_pre_attention_layer_norm
lowerCamelCase__ : str = tax_enc_dec_attention_key
lowerCamelCase__ : List[Any] = tax_enc_dec_attention_out
lowerCamelCase__ : List[str] = tax_enc_dec_attention_query
lowerCamelCase__ : int = tax_enc_dec_attention_value
lowerCamelCase__ : Optional[int] = tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__ : Union[str, Any] = tax_mlp_wi_a
lowerCamelCase__ : Any = tax_mlp_wi_a
else:
lowerCamelCase__ : Union[str, Any] = tax_mlp_wi
lowerCamelCase__ : Union[str, Any] = tax_mlp_wo
lowerCamelCase__ : Tuple = txa_mlp_layer_norm
lowerCamelCase__ : Dict = flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__ : Optional[int] = tax_model['target']['decoder']['decoder_norm']['scale']
lowerCamelCase__ : List[str] = txa_decoder_norm
# Only for layer 0:
lowerCamelCase__ : List[Any] = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T
lowerCamelCase__ : Tuple = tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__ : Tuple = tax_model['target']['token_embedder']['embedding']
lowerCamelCase__ : List[str] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__ : Dict = tax_model['target']['decoder']['logits_dense']['kernel']
flax_model.save_pretrained(_UpperCAmelCase )
print('T5X Model was sucessfully converted!' )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint."""
)
parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""")
parser.add_argument(
"""--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model."""
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 45 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCAmelCase ( pl.LightningModule ):
def __init__( self : List[str] , UpperCAmelCase : Optional[Any] ) -> int:
super().__init__()
lowerCamelCase__ : List[str] = model
lowerCamelCase__ : Dict = 2
lowerCamelCase__ : Dict = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
# load longformer model from model identifier
lowerCamelCase__ : List[str] = LongformerModel.from_pretrained(_UpperCAmelCase )
lowerCamelCase__ : Dict = LightningModel(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
lowerCamelCase__ : Dict = LongformerForQuestionAnswering.from_pretrained(_UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 45 | 1 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a_ = 'base_with_context'
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"]))
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"]) , requires_grad=_a)
for lyr_num, lyr in enumerate(model.encoders):
SCREAMING_SNAKE_CASE : List[Any] = weights[f"layers_{lyr_num}"]
SCREAMING_SNAKE_CASE : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : Tuple = ly_weight["attention"]
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"]))
return model
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"]) , requires_grad=_a)
for lyr_num, lyr in enumerate(model.encoders):
SCREAMING_SNAKE_CASE : Optional[Any] = weights[f"layers_{lyr_num}"]
SCREAMING_SNAKE_CASE : Union[str, Any] = ly_weight["attention"]
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T))
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T))
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"]))
return model
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T))
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"]) , requires_grad=_a)
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T))
for lyr_num, lyr in enumerate(model.decoders):
SCREAMING_SNAKE_CASE : Optional[int] = weights[f"layers_{lyr_num}"]
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : Optional[int] = ly_weight["self_attention"]
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
SCREAMING_SNAKE_CASE : List[Any] = ly_weight["MultiHeadDotProductAttention_0"]
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"]))
SCREAMING_SNAKE_CASE : str = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T))
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T))
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"]))
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T))
return model
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : Any = checkpoints.load_tax_checkpoint(args.checkpoint_path)
SCREAMING_SNAKE_CASE : Optional[int] = jnp.tree_util.tree_map(onp.array , _a)
SCREAMING_SNAKE_CASE : Dict = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
SCREAMING_SNAKE_CASE : int = os.path.join(args.checkpoint_path , ".." , "config.gin")
SCREAMING_SNAKE_CASE : List[str] = inference.parse_training_gin_file(_a , _a)
SCREAMING_SNAKE_CASE : str = inference.InferenceModel(args.checkpoint_path , _a)
SCREAMING_SNAKE_CASE : Any = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large")
SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
SCREAMING_SNAKE_CASE : Dict = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
SCREAMING_SNAKE_CASE : Union[str, Any] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE : Optional[int] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _a)
SCREAMING_SNAKE_CASE : List[str] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _a)
SCREAMING_SNAKE_CASE : Union[str, Any] = load_decoder(ta_checkpoint["target"]["decoder"] , _a)
SCREAMING_SNAKE_CASE : Tuple = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder")
SCREAMING_SNAKE_CASE : Dict = SpectrogramDiffusionPipeline(
notes_encoder=_a , continuous_encoder=_a , decoder=_a , scheduler=_a , melgan=_a , )
if args.save:
pipe.save_pretrained(args.output_path)
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
a_ = parser.parse_args()
main(args) | 76 |
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a = logging.get_logger(__name__)
_a = {"vocab_file": "spiece.model"}
_a = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_a = {
"AI-Sweden/gpt-sw3-126m": 2_048,
"AI-Sweden/gpt-sw3-350m": 2_048,
"AI-Sweden/gpt-sw3-1.6b": 2_048,
"AI-Sweden/gpt-sw3-6.7b": 2_048,
"AI-Sweden/gpt-sw3-20b": 2_048,
}
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase__ = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCamelCase__ = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCamelCase__ = '''<|endoftext|>''' if eos_token is None else eos_token
lowerCamelCase__ = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCamelCase__ = unk_token if pad_token is None else pad_token
lowerCamelCase__ = eos_token if bos_token is None else bos_token
else:
lowerCamelCase__ = '''<pad>''' if pad_token is None else pad_token
lowerCamelCase__ = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
lowerCamelCase__ = do_lower_case
lowerCamelCase__ = remove_space
lowerCamelCase__ = keep_accents
lowerCamelCase__ = vocab_file
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
lowerCamelCase__ = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCamelCase__ = re.compile(
F'[{"".join(map(__lowerCAmelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self ):
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.non_printing_characters_re.sub('''''' , __lowerCAmelCase )
# Normalize whitespaces
lowerCamelCase__ = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCamelCase__ = unicodedata.normalize('''NFC''' , __lowerCAmelCase )
return text
def __lowerCamelCase ( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.PieceToId(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.IdToPiece(__lowerCAmelCase )
@staticmethod
def __lowerCamelCase ( __lowerCAmelCase ):
'''simple docstring'''
return out_string
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = []
lowerCamelCase__ = ''''''
lowerCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
lowerCamelCase__ = True
lowerCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
lowerCamelCase__ = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , '''wb''' ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
'''simple docstring'''
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowerCamelCase__ = self.preprocess_text(__lowerCAmelCase )
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
else:
lowerCamelCase__ = [self.preprocess_text(__lowerCAmelCase ) for t in text]
lowerCamelCase__ = self.sp_model.encode(__lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
lowerCamelCase__ = torch.tensor(__lowerCAmelCase )
return token_ids
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
return self.sp_model.decode(__lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
lowerCamelCase__ = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__lowerCAmelCase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=__lowerCAmelCase )
| 209 | 0 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowerCamelCase ( self ) -> Any:
'''simple docstring'''
__UpperCamelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__UpperCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__UpperCamelCase : str = "xvjiarui/stable-diffusion-2-inpainting"
__UpperCamelCase : List[Any] = FlaxStableDiffusionInpaintPipeline.from_pretrained(__UpperCamelCase , safety_checker=__UpperCamelCase )
__UpperCamelCase : Tuple = "Face of a yellow cat, high resolution, sitting on a park bench"
__UpperCamelCase : Tuple = jax.random.PRNGKey(0 )
__UpperCamelCase : List[str] = 50
__UpperCamelCase : int = jax.device_count()
__UpperCamelCase : List[str] = num_samples * [prompt]
__UpperCamelCase : Optional[Any] = num_samples * [init_image]
__UpperCamelCase : Optional[Any] = num_samples * [mask_image]
__UpperCamelCase : Optional[int] = pipeline.prepare_inputs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# shard inputs and rng
__UpperCamelCase : Dict = replicate(__UpperCamelCase )
__UpperCamelCase : Optional[int] = jax.random.split(__UpperCamelCase , jax.device_count() )
__UpperCamelCase : Optional[int] = shard(__UpperCamelCase )
__UpperCamelCase : Dict = shard(__UpperCamelCase )
__UpperCamelCase : Tuple = shard(__UpperCamelCase )
__UpperCamelCase : int = pipeline(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , jit=__UpperCamelCase )
__UpperCamelCase : Tuple = output.images.reshape(__UpperCamelCase , 5_12 , 5_12 , 3 )
__UpperCamelCase : Any = images[0, 2_53:2_56, 2_53:2_56, -1]
__UpperCamelCase : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCamelCase : List[Any] = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 | 357 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers | 171 | 0 |
from collections import deque
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Optional[Any] = process_name # process name
_lowercase : List[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_lowercase : Tuple = arrival_time
_lowercase : Any = burst_time # remaining burst time
_lowercase : Optional[int] = 0 # total time of the process wait in ready queue
_lowercase : Union[str, Any] = 0 # time from arrival time to completion time
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> None:
"""simple docstring"""
_lowercase : List[str] = number_of_queues
# time slice of queues that round robin algorithm applied
_lowercase : str = time_slices
# unfinished process is in this ready_queue
_lowercase : Optional[Any] = queue
# current time
_lowercase : Union[str, Any] = current_time
# finished process is in this sequence queue
_lowercase : deque[Process] = deque()
def UpperCamelCase ( self) -> list[str]:
"""simple docstring"""
_lowercase : List[Any] = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
_lowercase : Optional[int] = []
for i in range(len(lowerCamelCase)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
_lowercase : Tuple = []
for i in range(len(lowerCamelCase)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
_lowercase : Optional[int] = []
for i in range(len(lowerCamelCase)):
completion_times.append(queue[i].stop_time)
return completion_times
def UpperCamelCase ( self, lowerCamelCase) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCamelCase ( self, lowerCamelCase) -> deque[Process]:
"""simple docstring"""
_lowercase : deque[Process] = deque() # sequence deque of finished process
while len(lowerCamelCase) != 0:
_lowercase : List[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCamelCase)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_lowercase : Optional[int] = 0
# set the process's turnaround time because it is finished
_lowercase : str = self.current_time - cp.arrival_time
# set the completion time
_lowercase : Any = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase)
self.finish_queue.extend(lowerCamelCase) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
_lowercase : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCamelCase)):
_lowercase : int = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCamelCase)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_lowercase : str = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCamelCase)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_lowercase : str = 0
# set the finish time
_lowercase : Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
_lowercase : List[str] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase)
self.finish_queue.extend(lowerCamelCase) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCamelCase ( self) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1):
_lowercase , _lowercase : str = self.round_robin(
self.ready_queue, self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE : Union[str, Any] = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE : str = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE : Optional[Any] = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE : Optional[Any] = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : List[str] = [17, 25]
SCREAMING_SNAKE_CASE : List[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE : List[str] = Process("P1", 0, 53)
SCREAMING_SNAKE_CASE : Optional[Any] = Process("P2", 0, 17)
SCREAMING_SNAKE_CASE : List[str] = Process("P3", 0, 68)
SCREAMING_SNAKE_CASE : Tuple = Process("P4", 0, 24)
SCREAMING_SNAKE_CASE : List[str] = 3
SCREAMING_SNAKE_CASE : Union[str, Any] = [17, 25]
SCREAMING_SNAKE_CASE : Optional[Any] = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE : str = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE : Union[str, Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 21 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase (lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ShapEImgaImgPipeline
lowerCAmelCase__ : List[str] = ["""image"""]
lowerCAmelCase__ : Any = ["""image"""]
lowerCAmelCase__ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
lowerCAmelCase__ : Tuple = False
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return 32
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
return 8
@property
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase )
return model
@property
def UpperCamelCase__ (self : int ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase )
return model
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ):
'''simple docstring'''
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase )
else:
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
| 2 | 0 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __A (_SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
return EnvironmentCommand()
class _lowerCAmelCase ( a ):
"""simple docstring"""
@staticmethod
def snake_case ( __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = parser.add_parser('env' )
download_parser.set_defaults(func=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = huggingface_hub.__version__
lowerCAmelCase__ :str = 'not installed'
lowerCAmelCase__ :Any = 'NA'
if is_torch_available():
import torch
lowerCAmelCase__ :Dict = torch.__version__
lowerCAmelCase__ :Dict = torch.cuda.is_available()
lowerCAmelCase__ :Any = 'not installed'
if is_transformers_available():
import transformers
lowerCAmelCase__ :List[Any] = transformers.__version__
lowerCAmelCase__ :Optional[Any] = 'not installed'
if is_accelerate_available():
import accelerate
lowerCAmelCase__ :Tuple = accelerate.__version__
lowerCAmelCase__ :Optional[int] = 'not installed'
if is_xformers_available():
import xformers
lowerCAmelCase__ :Union[str, Any] = xformers.__version__
lowerCAmelCase__ :Optional[int] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"{pt_version} ({pt_cuda_available})",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(__UpperCAmelCase ) )
return info
@staticmethod
def snake_case ( __UpperCAmelCase ):
'''simple docstring'''
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 371 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
lowerCAmelCase__ :list[list[int]] = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCAmelCase__ :str = 1
for n in range(m + 1 ):
for k in range(1 , _SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__A = int(input("""Enter a number: """).strip())
print(partition(n))
except ValueError:
print("""Please enter a number.""")
else:
try:
__A = int(sys.argv[1])
print(partition(n))
except ValueError:
print("""Please pass a number.""")
| 254 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''DeiTFeatureExtractor''']
lowerCAmelCase__ = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 | """simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Tuple= logging.get_logger(__name__)
_a : str= {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_a : Optional[int]= {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_a : Tuple= {"facebook/blenderbot-3B": 128}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : Optional[int] = BlenderbotTokenizer
def __init__(self : int , _A : Tuple=None , _A : str=None , _A : Union[str, Any]=None , _A : str="replace" , _A : List[Any]="<s>" , _A : List[Any]="</s>" , _A : Optional[int]="</s>" , _A : List[str]="<s>" , _A : Union[str, Any]="<unk>" , _A : Any="<pad>" , _A : str="<mask>" , _A : Union[str, Any]=False , _A : Optional[Any]=True , **_A : Optional[int] , ) -> int:
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
__snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , _A) != add_prefix_space:
__snake_case : Dict = getattr(_A , pre_tok_state.pop('type'))
__snake_case : int = add_prefix_space
__snake_case : Optional[int] = pre_tok_class(**_A)
__snake_case : str = add_prefix_space
__snake_case : Dict = 'post_processor'
__snake_case : Optional[int] = getattr(self.backend_tokenizer , _A , _A)
if tokenizer_component_instance:
__snake_case : Any = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case : int = tuple(state['sep'])
if "cls" in state:
__snake_case : int = tuple(state['cls'])
__snake_case : Any = False
if state.get('add_prefix_space' , _A) != add_prefix_space:
__snake_case : int = add_prefix_space
__snake_case : Dict = True
if state.get('trim_offsets' , _A) != trim_offsets:
__snake_case : int = trim_offsets
__snake_case : Dict = True
if changes_to_apply:
__snake_case : List[str] = getattr(_A , state.pop('type'))
__snake_case : Optional[int] = component_class(**_A)
setattr(self.backend_tokenizer , _A , _A)
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowercase (self : Optional[int]) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def _lowercase (self : Union[str, Any] , _A : List[Any]) -> List[Any]:
__snake_case : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else value
__snake_case : Optional[int] = value
def _lowercase (self : Tuple , *_A : int , **_A : Union[str, Any]) -> BatchEncoding:
__snake_case : List[str] = kwargs.get('is_split_into_words' , _A)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_A , **_A)
def _lowercase (self : Any , *_A : Union[str, Any] , **_A : Union[str, Any]) -> BatchEncoding:
__snake_case : Tuple = kwargs.get('is_split_into_words' , _A)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_A , **_A)
def _lowercase (self : Optional[Any] , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : List[str] = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
def _lowercase (self : Any , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : List[str] = [self.sep_token_id]
__snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowercase (self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None) -> Tuple:
return token_ids_a + [self.eos_token_id]
def _lowercase (self : Optional[int] , _A : "Conversation") -> List[int]:
__snake_case : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text)
else:
# Generated responses should contain them already.
inputs.append(_A)
__snake_case : Union[str, Any] = ' '.join(_A)
__snake_case : List[str] = self.encode(_A)
if len(_A) > self.model_max_length:
__snake_case : str = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 172 | 0 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a_ ( a__ ):
"""simple docstring"""
@staticmethod
@abstractmethod
def __lowerCAmelCase ( _lowerCamelCase ) ->Optional[Any]:
raise NotImplementedError()
@abstractmethod
def __lowerCAmelCase ( self ) ->Optional[Any]:
raise NotImplementedError()
| 19 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = DDIMPipeline
__SCREAMING_SNAKE_CASE : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
__SCREAMING_SNAKE_CASE : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = False
def __lowerCAmelCase ( self ) ->int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler()
SCREAMING_SNAKE_CASE : Dict = {'''unet''': unet, '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->int:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : int = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Optional[int] = '''cpu'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[str] = pipe(**_lowerCamelCase ).images
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
SCREAMING_SNAKE_CASE : int = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
SCREAMING_SNAKE_CASE : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1e-3 )
def __lowerCAmelCase ( self ) ->Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self ) ->Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self ) ->Union[str, Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = '''google/ddpm-cifar10-32'''
SCREAMING_SNAKE_CASE : Dict = UNetaDModel.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Tuple = DDIMScheduler()
SCREAMING_SNAKE_CASE : Optional[int] = DDIMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
ddim.to(_lowerCamelCase )
ddim.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = ddim(generator=_lowerCamelCase , eta=0.0 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : List[Any] = '''google/ddpm-ema-bedroom-256'''
SCREAMING_SNAKE_CASE : List[str] = UNetaDModel.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = DDIMScheduler.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = DDIMPipeline(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
ddpm.to(_lowerCamelCase )
ddpm.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[int] = ddpm(generator=_lowerCamelCase , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE : Any = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 19 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( __A ):
__SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE = "LayoutLMv3ImageProcessor"
__SCREAMING_SNAKE_CASE = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,**__lowerCamelCase ):
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''',lowercase_,)
A__ = kwargs.pop('''feature_extractor''' )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase_,lowercase_ )
def __call__( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = 0,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = True,__lowerCamelCase = None,**__lowerCamelCase,):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
A__ = self.image_processor(images=lowercase_,return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_,lowercase_ ):
A__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
A__ = features["words"]
A__ = self.tokenizer(
text=text if text is not None else features['''words'''],text_pair=text_pair if text_pair is not None else None,boxes=boxes if boxes is not None else features['''boxes'''],word_labels=lowercase_,add_special_tokens=lowercase_,padding=lowercase_,truncation=lowercase_,max_length=lowercase_,stride=lowercase_,pad_to_multiple_of=lowercase_,return_token_type_ids=lowercase_,return_attention_mask=lowercase_,return_overflowing_tokens=lowercase_,return_special_tokens_mask=lowercase_,return_offsets_mapping=lowercase_,return_length=lowercase_,verbose=lowercase_,return_tensors=lowercase_,**lowercase_,)
# add pixel values
A__ = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
A__ = self.get_overflowing_images(lowercase_,encoded_inputs['''overflow_to_sample_mapping'''] )
A__ = images
return encoded_inputs
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
A__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f" {len(lowercase_ )} and {len(lowercase_ )}" )
return images_with_overflow
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.batch_decode(*lowercase_,**lowercase_ )
def UpperCamelCase ( self,*__lowerCamelCase,**__lowerCamelCase ):
return self.tokenizer.decode(*lowercase_,**lowercase_ )
@property
def UpperCamelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''',lowercase_,)
return self.image_processor_class
@property
def UpperCamelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''',lowercase_,)
return self.image_processor
| 193 | import operator
def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None):
lowercase__ : int = operator.lt if reverse else operator.gt
lowercase__ : str = solution or []
if not arr:
return solution
lowercase__ : List[str] = [arr.pop(0)]
for i, item in enumerate(_lowerCamelCase):
if _operator(_lowerCamelCase , sublist[-1]):
sublist.append(_lowerCamelCase)
arr.pop(_lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(_lowerCamelCase)
else:
while sublist:
lowercase__ : str = sublist.pop(0)
for i, xx in enumerate(_lowerCamelCase):
if not _operator(_lowerCamelCase , _lowerCamelCase):
solution.insert(_lowerCamelCase , _lowerCamelCase)
break
else:
solution.append(_lowerCamelCase)
strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 87 | 0 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
_SCREAMING_SNAKE_CASE = {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def _lowerCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : str , lowerCamelCase_ : str ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__lowercase = (
f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"
f"Valid values are: {', '.join(lowerCamelCase_ )}"
)
raise ValueError(lowerCamelCase_ )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 217 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _lowerCAmelCase ( ):
print('''Making key files...''' )
make_key_files('''rsa''' , 1_0_2_4 )
print('''Key files generation successful.''' )
def _lowerCAmelCase ( lowerCamelCase_ : int ):
print('''Generating prime p...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
print('''Generating prime q...''' )
__lowercase = rabinMiller.generate_large_prime(lowerCamelCase_ )
__lowercase = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
__lowercase = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowerCamelCase_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
__lowercase = cryptoMath.find_mod_inverse(lowerCamelCase_ , (p - 1) * (q - 1) )
__lowercase = (n, e)
__lowercase = (n, d)
return (public_key, private_key)
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print('''\nWARNING:''' )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__lowercase , __lowercase = generate_key(lowerCamelCase_ )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , '''w''' ) as out_file:
out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" )
if __name__ == "__main__":
main()
| 217 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Any = CLIPTokenizer
lowerCAmelCase : Union[str, Any] = CLIPTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : str = False
def UpperCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
super().setUp()
# fmt: off
lowercase__ : Dict = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowercase__ : Union[str, Any] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
lowercase__ : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
lowercase__ : List[str] = {'''unk_token''': '''<unk>'''}
lowercase__ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
def UpperCAmelCase ( self : Any ,**_snake_case : Any ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : Optional[Any] ,**_snake_case : List[str] ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Dict = '''lower newer'''
lowercase__ : Optional[Any] = '''lower newer'''
return input_text, output_text
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowercase__ : Dict = '''lower newer'''
lowercase__ : Union[str, Any] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
lowercase__ : Tuple = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
lowercase__ : List[str] = tokens + [tokenizer.unk_token]
lowercase__ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) ,_snake_case )
@require_ftfy
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : List[str] = self.tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
lowercase__ : Dict = self.rust_tokenizer_class.from_pretrained(_snake_case ,**_snake_case )
lowercase__ : Union[str, Any] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
lowercase__ : str = tokenizer_s.tokenize(_snake_case )
lowercase__ : Optional[Any] = tokenizer_r.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowercase__ : Optional[Any] = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
lowercase__ : int = tokenizer_s.tokenize(_snake_case )
lowercase__ : Union[str, Any] = tokenizer_r.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# Test that the tokenization is identical on unicode of space type
lowercase__ : Optional[int] = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowercase__ : Tuple = tokenizer_s.tokenize(_snake_case )
lowercase__ : Optional[int] = tokenizer_r.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
# Test that the tokenization is identical on unicode of line break type
lowercase__ : Tuple = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowercase__ : Dict = tokenizer_s.tokenize(_snake_case )
lowercase__ : Tuple = tokenizer_r.tokenize(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Union[str, Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase__ : List[Any] = f"""{text_of_1_token} {text_of_1_token}"""
lowercase__ : Dict = self.rust_tokenizer_class.from_pretrained(
_snake_case ,use_fast=_snake_case ,)
lowercase__ : Optional[int] = tokenizer_r(_snake_case ,return_offsets_mapping=_snake_case ,add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(_snake_case ) + 1, len(_snake_case ) + 1 + len(_snake_case )) ,)
lowercase__ : Tuple = f""" {text}"""
lowercase__ : Any = self.rust_tokenizer_class.from_pretrained(
_snake_case ,use_fast=_snake_case ,)
lowercase__ : Dict = tokenizer_r(_snake_case ,return_offsets_mapping=_snake_case ,add_special_tokens=_snake_case )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(_snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(_snake_case ) + 1, 1 + len(_snake_case ) + 1 + len(_snake_case )) ,)
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_snake_case ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def UpperCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
| 16 |
from math import pi, sqrt, tan
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__lowerCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__lowerCamelCase = (sidea + sidea + sidea) / 2
__lowerCamelCase = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f'''Rectangle: {area_rectangle(10, 20) = }''')
print(f'''Square: {area_square(10) = }''')
print(f'''Triangle: {area_triangle(10, 10) = }''')
print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''')
print(f'''Parallelogram: {area_parallelogram(10, 20) = }''')
print(f'''Rhombus: {area_rhombus(10, 20) = }''')
print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''')
print(f'''Circle: {area_circle(20) = }''')
print(f'''Ellipse: {area_ellipse(10, 20) = }''')
print("\nSurface Areas of various geometric shapes: \n")
print(f'''Cube: {surface_area_cube(20) = }''')
print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''')
print(f'''Sphere: {surface_area_sphere(20) = }''')
print(f'''Hemisphere: {surface_area_hemisphere(20) = }''')
print(f'''Cone: {surface_area_cone(10, 20) = }''')
print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''')
print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''')
print(f'''Torus: {surface_area_torus(20, 10) = }''')
print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''')
print(f'''Square: {area_reg_polygon(4, 10) = }''')
print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
| 90 | 0 |
UpperCAmelCase__ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCAmelCase__ = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCAmelCase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 26 |
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Tuple = logging.get_logger(__name__)
_A : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class _lowercase ( A__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = """mra"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Tuple=5_02_65 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=1 , SCREAMING_SNAKE_CASE__ : Dict=0.0_2 , SCREAMING_SNAKE_CASE__ : int=1e-5 , SCREAMING_SNAKE_CASE__ : str="absolute" , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : List[str]="full" , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , **SCREAMING_SNAKE_CASE__ : int , ) -> str:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = block_per_row
__lowerCAmelCase = approx_mode
__lowerCAmelCase = initial_prior_first_n_blocks
__lowerCAmelCase = initial_prior_diagonal_n_blocks
| 229 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__: str = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: Union[str, Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: int = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase__: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__A = False
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = 'A painting of a squirrel eating a burger '
lowerCAmelCase__ :Tuple = torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = pipe(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = generator.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = pipe(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 'A painting of a squirrel eating a burger '
lowerCAmelCase__ :Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ :Union[str, Any] = pipe(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
lowerCAmelCase__ :Dict = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase__ :int = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 254 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
lowerCAmelCase__ :Optional[int] = quote(_SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
| 254 | 1 |
from __future__ import annotations
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : list[list[int]] = []
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Any = sum(_lowercase )
create_state_space_tree(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
return result
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
if sum(_lowercase ) > max_sum or (remaining_nums_sum + sum(_lowercase )) < max_sum:
return
if sum(_lowercase ) == max_sum:
result.append(_lowercase )
return
for index in range(_lowercase , len(_lowercase ) ):
create_state_space_tree(
_lowercase , _lowercase , index + 1 , [*path, nums[index]] , _lowercase , remaining_nums_sum - nums[index] , )
__UpperCamelCase : Optional[Any] = [3, 34, 4, 12, 5, 2]
__UpperCamelCase : str = 9
__UpperCamelCase : List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 182 | import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('''RGB''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
SCREAMING_SNAKE_CASE : Tuple = transform(_lowercase ).unsqueeze(0 ).to(_lowercase )
return image
def A ( _lowercase ):
if "visual_encoder" in key:
SCREAMING_SNAKE_CASE : Optional[int] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , _lowercase )
if "blocks" in key:
SCREAMING_SNAKE_CASE : str = re.sub(R'''blocks''' , '''layers''' , _lowercase )
if "attn" in key:
SCREAMING_SNAKE_CASE : List[Any] = re.sub(R'''attn''' , '''self_attn''' , _lowercase )
if "norm1" in key:
SCREAMING_SNAKE_CASE : List[str] = re.sub(R'''norm1''' , '''layer_norm1''' , _lowercase )
if "norm2" in key:
SCREAMING_SNAKE_CASE : str = re.sub(R'''norm2''' , '''layer_norm2''' , _lowercase )
if "encoder.norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = re.sub(R'''encoder.norm''' , '''post_layernorm''' , _lowercase )
if "encoder.patch_embed.proj" in key:
SCREAMING_SNAKE_CASE : Dict = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , _lowercase )
if "encoder.pos_embed" in key:
SCREAMING_SNAKE_CASE : int = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , _lowercase )
if "encoder.cls_token" in key:
SCREAMING_SNAKE_CASE : List[str] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , _lowercase )
if "self_attn" in key:
SCREAMING_SNAKE_CASE : int = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , _lowercase )
return key
@torch.no_grad()
def A ( _lowercase , _lowercase=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE : Any = BlipConfig.from_pretrained(_lowercase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
SCREAMING_SNAKE_CASE : str = BlipForConditionalGeneration(_lowercase ).eval()
SCREAMING_SNAKE_CASE : List[Any] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
SCREAMING_SNAKE_CASE : Tuple = blip_decoder(pretrained=_lowercase , image_size=384 , vit='''base''' )
SCREAMING_SNAKE_CASE : Any = pt_model.eval()
SCREAMING_SNAKE_CASE : Dict = pt_model.state_dict()
for key in modified_state_dict.copy():
SCREAMING_SNAKE_CASE : Tuple = modified_state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = rename_key(_lowercase )
SCREAMING_SNAKE_CASE : Any = value
hf_model.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = 384
SCREAMING_SNAKE_CASE : Tuple = load_demo_image(image_size=_lowercase , device='''cpu''' )
SCREAMING_SNAKE_CASE : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
SCREAMING_SNAKE_CASE : int = tokenizer(['''a picture of'''] ).input_ids
SCREAMING_SNAKE_CASE : Optional[int] = hf_model.generate(_lowercase , _lowercase )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(_lowercase )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_lowercase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
SCREAMING_SNAKE_CASE : str = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
SCREAMING_SNAKE_CASE : Tuple = blip_vqa(pretrained=_lowercase , image_size=_lowercase , vit='''base''' )
vqa_model.eval()
SCREAMING_SNAKE_CASE : List[str] = vqa_model.state_dict()
for key in modified_state_dict.copy():
SCREAMING_SNAKE_CASE : Optional[int] = modified_state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = rename_key(_lowercase )
SCREAMING_SNAKE_CASE : str = value
SCREAMING_SNAKE_CASE : Dict = BlipForQuestionAnswering(_lowercase )
hf_vqa_model.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Dict = ['''How many dogs are in this image?''']
SCREAMING_SNAKE_CASE : Tuple = tokenizer(_lowercase , return_tensors='''pt''' ).input_ids
SCREAMING_SNAKE_CASE : Dict = hf_vqa_model.generate(_lowercase , _lowercase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
SCREAMING_SNAKE_CASE : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
SCREAMING_SNAKE_CASE : int = blip_itm(pretrained=_lowercase , image_size=_lowercase , vit='''base''' )
itm_model.eval()
SCREAMING_SNAKE_CASE : List[Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
SCREAMING_SNAKE_CASE : int = modified_state_dict.pop(_lowercase )
SCREAMING_SNAKE_CASE : Dict = rename_key(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = value
SCREAMING_SNAKE_CASE : int = BlipForImageTextRetrieval(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = ['''A picture of a woman with a dog sitting in a beach''']
SCREAMING_SNAKE_CASE : str = tokenizer(
_lowercase , return_tensors='''pt''' , padding='''max_length''' , truncation=_lowercase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_lowercase )
hf_itm_model.eval()
SCREAMING_SNAKE_CASE : int = hf_itm_model(_lowercase , _lowercase , use_itm_head=_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = hf_itm_model(_lowercase , _lowercase , use_itm_head=_lowercase )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__UpperCamelCase : int = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 182 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__UpperCamelCase = ''''''
__UpperCamelCase = ''''''
__UpperCamelCase = ''''''
__UpperCamelCase = 1 # (0 is vertical, 1 is horizontal)
def lowercase () -> None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print('Processing...' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = update_image_and_anno(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for index, image in enumerate(SCREAMING_SNAKE_CASE_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE = random_chars(32 )
SCREAMING_SNAKE_CASE = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
SCREAMING_SNAKE_CASE = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , SCREAMING_SNAKE_CASE_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(SCREAMING_SNAKE_CASE_ )} with {file_name}' )
SCREAMING_SNAKE_CASE = []
for anno in new_annos[index]:
SCREAMING_SNAKE_CASE = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(SCREAMING_SNAKE_CASE_ )
with open(F'/{file_root}.txt' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> tuple[list, list]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '*.txt' ) ):
SCREAMING_SNAKE_CASE = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(SCREAMING_SNAKE_CASE_ ) as in_file:
SCREAMING_SNAKE_CASE = in_file.readlines()
SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , F'{label_name}.jpg' )
SCREAMING_SNAKE_CASE = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
return img_paths, labels
def lowercase (SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : int = 1 ) -> tuple[list, list, list]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = img_list[idx]
path_list.append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = anno_list[idx]
SCREAMING_SNAKE_CASE = cva.imread(SCREAMING_SNAKE_CASE_ )
if flip_type == 1:
SCREAMING_SNAKE_CASE = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
SCREAMING_SNAKE_CASE = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
SCREAMING_SNAKE_CASE = cva.flip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for bbox in img_annos:
SCREAMING_SNAKE_CASE = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(SCREAMING_SNAKE_CASE_ )
new_imgs_list.append(SCREAMING_SNAKE_CASE_ )
return new_imgs_list, new_annos_lists, path_list
def lowercase (SCREAMING_SNAKE_CASE_ : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 363 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE_ : Any = ["""image"""]
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""image"""]
SCREAMING_SNAKE_CASE_ : Any = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE_ : Any = False
@property
def __A ( self ) -> Tuple:
return 32
@property
def __A ( self ) -> Optional[int]:
return 32
@property
def __A ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __A ( self ) -> Union[str, Any]:
return 8
@property
def __A ( self ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
SCREAMING_SNAKE_CASE = CLIPVisionModel(lowerCAmelCase__ )
return model
@property
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = CLIPImageProcessor(
crop_size=224 , do_center_crop=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def __A ( self ) -> str:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
SCREAMING_SNAKE_CASE = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def __A ( self ) -> List[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE = ShapERenderer(**lowerCAmelCase__ )
return model
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = self.dummy_prior
SCREAMING_SNAKE_CASE = self.dummy_image_encoder
SCREAMING_SNAKE_CASE = self.dummy_image_processor
SCREAMING_SNAKE_CASE = self.dummy_renderer
SCREAMING_SNAKE_CASE = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=lowerCAmelCase__ , clip_sample=lowerCAmelCase__ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = output.images[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Union[str, Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = torch_device == 'cpu'
SCREAMING_SNAKE_CASE = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase__ , relax_max_difference=lowerCAmelCase__ , )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
SCREAMING_SNAKE_CASE = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 38 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Dict = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "distilbert"
A_ = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self , __a=3_0522 , __a=512 , __a=False , __a=6 , __a=12 , __a=768 , __a=4 * 768 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.02 , __a=0.1 , __a=0.2 , __a=0 , **__a , ):
'''simple docstring'''
__a : Any = vocab_size
__a : List[str] = max_position_embeddings
__a : Optional[Any] = sinusoidal_pos_embds
__a : int = n_layers
__a : Optional[Any] = n_heads
__a : Optional[int] = dim
__a : Optional[int] = hidden_dim
__a : Optional[Any] = dropout
__a : Tuple = attention_dropout
__a : Dict = activation
__a : List[str] = initializer_range
__a : Optional[Any] = qa_dropout
__a : Optional[int] = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__a : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 27 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 | 1 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[str], UpperCamelCase__ : Dict, UpperCamelCase__ : Dict=True, UpperCamelCase__ : str="pt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple ={'''add_prefix_space''': True} if isinstance(UpperCamelCase__, UpperCamelCase__ ) and not line.startswith(''' ''' ) else {}
SCREAMING_SNAKE_CASE__ : Any =padding_side
return tokenizer(
[line], max_length=UpperCamelCase__, padding='''max_length''' if pad_to_max_length else None, truncation=UpperCamelCase__, return_tensors=UpperCamelCase__, add_special_tokens=UpperCamelCase__, **UpperCamelCase__, )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int]=None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =input_ids.ne(UpperCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Any , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : int , __lowercase : Tuple="train" , __lowercase : Tuple=None , __lowercase : Union[str, Any]=None , __lowercase : List[str]=None , __lowercase : int="" , ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] =Path(__lowercase ).joinpath(type_path + '''.source''' )
SCREAMING_SNAKE_CASE__ : int =Path(__lowercase ).joinpath(type_path + '''.target''' )
SCREAMING_SNAKE_CASE__ : Dict =self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE__ : str =max_source_length
SCREAMING_SNAKE_CASE__ : Any =max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
SCREAMING_SNAKE_CASE__ : Optional[int] =tokenizer
SCREAMING_SNAKE_CASE__ : List[Any] =prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE__ : List[str] =src_lang
SCREAMING_SNAKE_CASE__ : int =tgt_lang
def __len__( self : Any ) -> Optional[Any]:
return len(self.src_lens )
def __getitem__( self : Tuple , __lowercase : Dict ) -> Dict[str, torch.Tensor]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip('''\n''' )
SCREAMING_SNAKE_CASE__ : str =linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip('''\n''' )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE__ : Tuple =(
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
SCREAMING_SNAKE_CASE__ : Tuple =self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
SCREAMING_SNAKE_CASE__ : Any =encode_line(__lowercase , __lowercase , self.max_source_length , '''right''' )
SCREAMING_SNAKE_CASE__ : Dict =encode_line(__lowercase , __lowercase , self.max_target_length , '''right''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =source_inputs['''input_ids'''].squeeze()
SCREAMING_SNAKE_CASE__ : Optional[Any] =target_inputs['''input_ids'''].squeeze()
SCREAMING_SNAKE_CASE__ : Dict =source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __magic_name__ ( __lowercase : Dict ) -> Optional[int]:
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def __magic_name__ ( self : List[str] , __lowercase : Tuple ) -> Dict[str, torch.Tensor]:
SCREAMING_SNAKE_CASE__ : str =torch.stack([x['''input_ids'''] for x in batch] )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.stack([x['''attention_mask'''] for x in batch] )
SCREAMING_SNAKE_CASE__ : List[Any] =torch.stack([x['''decoder_input_ids'''] for x in batch] )
SCREAMING_SNAKE_CASE__ : List[str] =(
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE__ : Dict =(
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE__ : str =trim_batch(__lowercase , __lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int =trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ={
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
a_ = getLogger(__name__)
def _a( UpperCamelCase__ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(UpperCamelCase__ ) )
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =get_git_info()
save_json(UpperCamelCase__, os.path.join(UpperCamelCase__, '''git_log.json''' ) )
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : List[Any]=4, **UpperCamelCase__ : Tuple ):
'''simple docstring'''
with open(UpperCamelCase__, '''w''' ) as f:
json.dump(UpperCamelCase__, UpperCamelCase__, indent=UpperCamelCase__, **UpperCamelCase__ )
def _a( UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
with open(UpperCamelCase__ ) as f:
return json.load(UpperCamelCase__ )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict =git.Repo(search_parent_directories=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
'''repo_id''': str(UpperCamelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _a( UpperCamelCase__ : Callable, UpperCamelCase__ : Iterable ):
'''simple docstring'''
return list(map(UpperCamelCase__, UpperCamelCase__ ) )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : str ):
'''simple docstring'''
with open(UpperCamelCase__, '''wb''' ) as f:
return pickle.dump(UpperCamelCase__, UpperCamelCase__ )
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
def remove_articles(UpperCamelCase__ : Tuple ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', UpperCamelCase__ )
def white_space_fix(UpperCamelCase__ : Tuple ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] =set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase__ : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCamelCase__ ) ) ) )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =normalize_answer(UpperCamelCase__ ).split()
SCREAMING_SNAKE_CASE__ : int =normalize_answer(UpperCamelCase__ ).split()
SCREAMING_SNAKE_CASE__ : Optional[Any] =Counter(UpperCamelCase__ ) & Counter(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE__ : Tuple =1.0 * num_same / len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =1.0 * num_same / len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =(2 * precision * recall) / (precision + recall)
return fa
def _a( UpperCamelCase__ : str, UpperCamelCase__ : str ):
'''simple docstring'''
return normalize_answer(UpperCamelCase__ ) == normalize_answer(UpperCamelCase__ )
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : List[str] ):
'''simple docstring'''
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Optional[int] =0
for hypo, pred in zip(UpperCamelCase__, UpperCamelCase__ ):
em += exact_match_score(UpperCamelCase__, UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
em /= len(UpperCamelCase__ )
return {"em": em}
def _a( UpperCamelCase__ : Dict ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def _a( UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] ={p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''dropout_rate'''
for p in extra_params:
if getattr(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
if not hasattr(UpperCamelCase__, UpperCamelCase__ ) and not hasattr(UpperCamelCase__, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(UpperCamelCase__ ) )
delattr(UpperCamelCase__, UpperCamelCase__ )
continue
SCREAMING_SNAKE_CASE__ : Optional[int] =p if hasattr(UpperCamelCase__, UpperCamelCase__ ) else equivalent_param[p]
setattr(UpperCamelCase__, UpperCamelCase__, getattr(UpperCamelCase__, UpperCamelCase__ ) )
delattr(UpperCamelCase__, UpperCamelCase__ )
return hparams, config | 222 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int, UpperCamelCase__ : List[str] ): # noqa: E741
'''simple docstring'''
while r - l > 1:
SCREAMING_SNAKE_CASE__ : List[str] =(l + r) // 2
if v[m] >= key:
SCREAMING_SNAKE_CASE__ : Dict =m
else:
SCREAMING_SNAKE_CASE__ : Any =m # noqa: E741
return r
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return 0
SCREAMING_SNAKE_CASE__ : List[Any] =[0] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict =1
SCREAMING_SNAKE_CASE__ : Union[str, Any] =v[0]
for i in range(1, len(UpperCamelCase__ ) ):
if v[i] < tail[0]:
SCREAMING_SNAKE_CASE__ : List[Any] =v[i]
elif v[i] > tail[length - 1]:
SCREAMING_SNAKE_CASE__ : int =v[i]
length += 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 222 | 1 |
"""simple docstring"""
from __future__ import annotations
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = order
# a_{0} ... a_{k}
UpperCAmelCase__ : Any = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase__ : List[str] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase__ : Any = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase__ : Any = [0.0] * self.order
def _a (self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if len(_lowerCamelCase ) < self.order:
UpperCAmelCase__ : Tuple = [1.0, *a_coeffs]
if len(_lowerCamelCase ) != self.order + 1:
UpperCAmelCase__ : Tuple = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != self.order + 1:
UpperCAmelCase__ : List[str] = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(_lowerCamelCase )}"""
)
raise ValueError(_lowerCamelCase )
UpperCAmelCase__ : Any = a_coeffs
UpperCAmelCase__ : List[Any] = b_coeffs
def _a (self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase__ : Optional[Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase__ : Optional[Any] = self.input_history[:-1]
UpperCAmelCase__ : List[Any] = self.output_history[:-1]
UpperCAmelCase__ : Union[str, Any] = sample
UpperCAmelCase__ : List[Any] = result
return result
| 171 |
"""simple docstring"""
from __future__ import annotations
def a__ ( lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None ) -> None:
if start is None:
UpperCAmelCase__ : Dict = 0
if end is None:
UpperCAmelCase__ : List[str] = len(lowerCAmelCase ) - 1
if start >= end:
return
UpperCAmelCase__ : int = (start + end) // 2
slowsort(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
slowsort(lowerCAmelCase , mid + 1 , lowerCAmelCase )
if sequence[end] < sequence[mid]:
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = sequence[mid], sequence[end]
slowsort(lowerCAmelCase , lowerCAmelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 171 | 1 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = set()
# Replace all the whitespace in our sentence
lowerCAmelCase_ : int = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(A__ ) == 26
def UpperCamelCase_ ( A__ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
lowerCAmelCase_ : Dict = [False] * 26
for char in input_str:
if char.islower():
lowerCAmelCase_ : Optional[int] = True
elif char.isupper():
lowerCAmelCase_ : Optional[Any] = True
return all(A__ )
def UpperCamelCase_ ( A__ : str = "The quick brown fox jumps over the lazy dog" , ):
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def UpperCamelCase_ ( ):
'''simple docstring'''
from timeit import timeit
lowerCAmelCase_ : Tuple = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=A__ ) )
print(timeit("""is_pangram_faster()""" , setup=A__ ) )
print(timeit("""is_pangram_fastest()""" , setup=A__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A : List[str] = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ["OwlViTFeatureExtractor"]
__A : str = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__magic_name__ = logging.get_logger(__name__) # pylint: disable=invalid-name
__magic_name__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=8 ):
__SCREAMING_SNAKE_CASE = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__SCREAMING_SNAKE_CASE = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
super().__init__()
self.register_modules(
text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , movq=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels) - 1)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
if latents is None:
__SCREAMING_SNAKE_CASE = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
__SCREAMING_SNAKE_CASE = latents.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , ):
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else 1
# get prompt text embeddings
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , truncation=lowerCAmelCase__ , max_length=7_7 , return_attention_mask=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors="""pt""" , )
__SCREAMING_SNAKE_CASE = text_inputs.input_ids
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}")
__SCREAMING_SNAKE_CASE = text_input_ids.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = text_inputs.attention_mask.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.text_encoder(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = prompt_embeds.repeat_interleave(lowerCAmelCase__ , dim=0)
__SCREAMING_SNAKE_CASE = text_encoder_hidden_states.repeat_interleave(lowerCAmelCase__ , dim=0)
__SCREAMING_SNAKE_CASE = text_mask.repeat_interleave(lowerCAmelCase__ , dim=0)
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
__SCREAMING_SNAKE_CASE = [""""""] * batch_size
elif type(lowerCAmelCase__) is not type(lowerCAmelCase__):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__)} !="
f" {type(lowerCAmelCase__)}.")
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(lowerCAmelCase__):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
""" the batch size of `prompt`.""")
else:
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , max_length=7_7 , truncation=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors="""pt""" , )
__SCREAMING_SNAKE_CASE = uncond_input.input_ids.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = uncond_input.attention_mask.to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.text_encoder(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE = negative_prompt_embeds.shape[1]
__SCREAMING_SNAKE_CASE = negative_prompt_embeds.repeat(1 , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = uncond_text_encoder_hidden_states.shape[1]
__SCREAMING_SNAKE_CASE = uncond_text_encoder_hidden_states.repeat(1 , lowerCAmelCase__ , 1)
__SCREAMING_SNAKE_CASE = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCAmelCase__ , -1)
__SCREAMING_SNAKE_CASE = uncond_text_mask.repeat_interleave(lowerCAmelCase__ , dim=0)
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([negative_prompt_embeds, prompt_embeds])
__SCREAMING_SNAKE_CASE = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
__SCREAMING_SNAKE_CASE = torch.cat([uncond_text_mask, text_mask])
return prompt_embeds, text_encoder_hidden_states, text_mask
def snake_case_ ( self , lowerCAmelCase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""")
__SCREAMING_SNAKE_CASE = torch.device(f"cuda:{gpu_id}")
__SCREAMING_SNAKE_CASE = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__=0):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0"""):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""")
__SCREAMING_SNAKE_CASE = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCAmelCase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cpu_offload_with_hook(lowerCAmelCase__ , lowerCAmelCase__ , prev_module_hook=lowerCAmelCase__)
if self.safety_checker is not None:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cpu_offload_with_hook(self.safety_checker , lowerCAmelCase__ , prev_module_hook=lowerCAmelCase__)
# We'll offload the last model manually.
__SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_ ( self):
if not hasattr(self.unet , """_hf_hook"""):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , """_hf_hook""")
and hasattr(module._hf_hook , """execution_device""")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__)
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__)}")
__SCREAMING_SNAKE_CASE = self._execution_device
__SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self._encode_prompt(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , dim=0)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = torch.cat(lowerCAmelCase__ , dim=0)
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0)
__SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0)
__SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(
dtype=prompt_embeds.dtype , device=lowerCAmelCase__)
self.scheduler.set_timesteps(lowerCAmelCase__ , device=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps
__SCREAMING_SNAKE_CASE = self.unet.config.in_channels
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = get_new_h_w(lowerCAmelCase__ , lowerCAmelCase__ , self.movq_scale_factor)
# create initial latent
__SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__)):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
__SCREAMING_SNAKE_CASE = self.unet(
sample=lowerCAmelCase__ , timestep=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , added_cond_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = noise_pred.chunk(2)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = variance_pred.chunk(2)
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , """variance_type""")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ , ).prev_sample
# post-processing
__SCREAMING_SNAKE_CASE = self.movq.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__)["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
__SCREAMING_SNAKE_CASE = image.clamp(0 , 1)
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCAmelCase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__)
| 100 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""OwlViTFeatureExtractor"""]
__A = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : List[Any]=3_2 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : str=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE__ : Dict=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : List[str]=None , ) -> Dict:
a_ : Dict = parent
a_ : List[Any] = batch_size
a_ : Dict = image_size
a_ : Dict = num_channels
a_ : Any = embeddings_size
a_ : Dict = hidden_sizes
a_ : Tuple = depths
a_ : List[Any] = is_training
a_ : Any = use_labels
a_ : int = hidden_act
a_ : int = num_labels
a_ : str = scope
a_ : List[Any] = len(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Optional[int] = None
if self.use_labels:
a_ : Any = ids_tensor([self.batch_size] , self.num_labels )
a_ : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
a_ : int = TFResNetModel(config=SCREAMING_SNAKE_CASE__ )
a_ : List[str] = model(SCREAMING_SNAKE_CASE__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
a_ : List[Any] = self.num_labels
a_ : Optional[int] = TFResNetForImageClassification(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
a_ : Union[str, Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ : Optional[Any] = config_and_inputs
a_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : List[str] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
snake_case__ : Any = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
snake_case__ : Optional[int] = False
snake_case__ : str = False
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
a_ : Tuple = TFResNetModelTester(self )
a_ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
a_ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Union[str, Any] = [*signature.parameters.keys()]
a_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
a_ : int = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
a_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Tuple = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ : str = layer_type
a_ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : List[Any] = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
"""simple docstring"""
a_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
a_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
a_ : str = self.default_image_processor
a_ : Optional[Any] = prepare_img()
a_ : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='tf' )
# forward pass
a_ : List[str] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
a_ : Optional[int] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 120 |
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> bool:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__A ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(__A ) == 1:
return True
a_ : Tuple = series[1] - series[0]
for index in range(len(__A ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> float:
"""simple docstring"""
if not isinstance(__A , __A ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__A ) == 0:
raise ValueError('Input list must be a non empty list' )
a_ : str = 0
for val in series:
answer += val
return answer / len(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 | 1 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCamelCase__ ( A__ : str="ro" , A__ : List[Any]="en" , A__ : Any="wmt16" , A__ : int=None ):
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__lowerCamelCase = f'{src_lang}-{tgt_lang}'
print(f'Converting {dataset}-{pair}' )
__lowerCamelCase = datasets.load_dataset(A__ , A__ )
if save_dir is None:
__lowerCamelCase = f'{dataset}-{pair}'
__lowerCamelCase = Path(A__ )
save_dir.mkdir(exist_ok=A__ )
for split in ds.keys():
print(f'Splitting {split} with {ds[split].num_rows} records' )
# to save to val.source, val.target like summary datasets
__lowerCamelCase = """val""" if split == """validation""" else split
__lowerCamelCase = save_dir.joinpath(f'{fn}.source' )
__lowerCamelCase = save_dir.joinpath(f'{fn}.target' )
__lowerCamelCase = src_path.open("""w+""" )
__lowerCamelCase = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowerCamelCase = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f'Saved {dataset} dataset to {save_dir}' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 12 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ = 16
UpperCAmelCase_ = 32
def lowerCamelCase__ ( A__ : Accelerator , A__ : int = 16 , A__ : str = "bert-base-cased" ):
'''simple docstring'''
__lowerCamelCase = AutoTokenizer.from_pretrained(A__ )
__lowerCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(A__ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowerCamelCase = datasets.map(
A__ , batched=A__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(A__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(A__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
__lowerCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( A__ : Tuple , A__ : Union[str, Any] , A__ : Tuple , A__ : Optional[Any] ):
'''simple docstring'''
model.eval()
__lowerCamelCase = 0
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowerCamelCase, __lowerCamelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(A__ ) - 1:
__lowerCamelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowerCamelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=A__ , references=A__ , )
__lowerCamelCase = metric.compute()
return eval_metric["accuracy"]
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Optional[int] ):
'''simple docstring'''
__lowerCamelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config["""lr"""]
__lowerCamelCase = int(config["""num_epochs"""] )
__lowerCamelCase = int(config["""seed"""] )
__lowerCamelCase = int(config["""batch_size"""] )
__lowerCamelCase = args.model_name_or_path
set_seed(A__ )
__lowerCamelCase, __lowerCamelCase = get_dataloaders(A__ , A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
__lowerCamelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowerCamelCase = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
__lowerCamelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowerCamelCase = 1
__lowerCamelCase = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
__lowerCamelCase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
__lowerCamelCase = 0
# We also need to keep track of the stating epoch so files are named properly
__lowerCamelCase = 0
__lowerCamelCase = evaluate.load("""glue""" , """mrpc""" )
__lowerCamelCase = num_epochs
if args.partial_train_epoch is not None:
__lowerCamelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__lowerCamelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
__lowerCamelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__lowerCamelCase = int(A__ ) + 1
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
accelerator.print("""resumed checkpoint performance:""" , A__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f:
__lowerCamelCase = json.load(A__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__lowerCamelCase = {}
for epoch in range(A__ , A__ ):
model.train()
for step, batch in enumerate(A__ ):
__lowerCamelCase = model(**A__ )
__lowerCamelCase = outputs.loss
__lowerCamelCase = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__lowerCamelCase = f'epoch_{epoch}'
__lowerCamelCase = os.path.join(args.output_dir , A__ )
accelerator.save_state(A__ )
__lowerCamelCase = evaluation_loop(A__ , A__ , A__ , A__ )
__lowerCamelCase = accuracy
__lowerCamelCase = lr_scheduler.get_lr()[0]
__lowerCamelCase = optimizer.param_groups[0]["""lr"""]
__lowerCamelCase = epoch
__lowerCamelCase = overall_step
accelerator.print(f'epoch {epoch}:' , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f:
json.dump(A__ , A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=A__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=A__ , )
parser.add_argument(
"""--output_dir""" , type=A__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=A__ , default=A__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=A__ , default=A__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=A__ , default=2 , help="""Number of train epochs.""" , )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 12 | 1 |
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : List[str] = int(__magic_name__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(__magic_name__ )
lowercase , lowercase : Union[str, Any] = divmod(__magic_name__ , 2 )
return binary_recursive(__magic_name__ ) + str(__magic_name__ )
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Optional[int] = str(__magic_name__ ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase : Dict = '''-''' if number.startswith('''-''' ) else ''''''
lowercase : str = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F"""{negative}0b{binary_recursive(int(__magic_name__ ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod() | 116 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Tuple = '''xmod'''
def __init__( self : Optional[Any] , _A : Union[str, Any]=30_522 , _A : List[Any]=768 , _A : Optional[Any]=12 , _A : Any=12 , _A : Tuple=3_072 , _A : Optional[int]="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[Any]=512 , _A : List[str]=2 , _A : str=0.02 , _A : Any=1E-12 , _A : Union[str, Any]=1 , _A : List[Any]=0 , _A : Dict=2 , _A : int="absolute" , _A : Dict=True , _A : int=None , _A : List[str]=False , _A : Dict=2 , _A : int=False , _A : Optional[int]=True , _A : Any=True , _A : Optional[int]=("en_XX",) , _A : Any=None , **_A : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowercase : Optional[Any] = vocab_size
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Union[str, Any] = hidden_act
lowercase : Tuple = intermediate_size
lowercase : List[str] = hidden_dropout_prob
lowercase : Union[str, Any] = attention_probs_dropout_prob
lowercase : Dict = max_position_embeddings
lowercase : Any = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : Tuple = position_embedding_type
lowercase : Optional[Any] = use_cache
lowercase : int = classifier_dropout
lowercase : Optional[int] = pre_norm
lowercase : Any = adapter_reduction_factor
lowercase : Union[str, Any] = adapter_layer_norm
lowercase : Optional[int] = adapter_reuse_layer_norm
lowercase : Optional[Any] = ln_before_adapter
lowercase : Union[str, Any] = list(_A )
lowercase : List[Any] = default_language
class _A ( _lowerCamelCase ):
@property
def __a ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 116 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : Dict = logging.get_logger(__name__)
a__ : Union[str, Any] = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowercase_ ( __A ):
__UpperCAmelCase = 'conditional_detr'
__UpperCAmelCase = ['past_key_values']
__UpperCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , a=True , a=None , a=3 , a=3_00 , a=6 , a=20_48 , a=8 , a=6 , a=20_48 , a=8 , a=0.0 , a=0.0 , a=True , a="relu" , a=2_56 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=1.0 , a=False , a="sine" , a="resnet50" , a=True , a=False , a=2 , a=5 , a=2 , a=1 , a=1 , a=2 , a=5 , a=2 , a=0.25 , **a , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase__ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(a , a ):
UpperCamelCase__ = backbone_config.get("model_type" )
UpperCamelCase__ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase__ = config_class.from_dict(a )
UpperCamelCase__ = use_timm_backbone
UpperCamelCase__ = backbone_config
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_queries
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = init_xavier_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = auxiliary_loss
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = backbone
UpperCamelCase__ = use_pretrained_backbone
UpperCamelCase__ = dilation
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = mask_loss_coefficient
UpperCamelCase__ = dice_loss_coefficient
UpperCamelCase__ = cls_loss_coefficient
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def __a ( self ):
return self.encoder_attention_heads
@property
def __a ( self ):
return self.d_model
def __a ( self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
class lowercase_ ( __A ):
__UpperCAmelCase = version.parse('1.11' )
@property
def __a ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def __a ( self ):
return 1e-5
@property
def __a ( self ):
return 12
| 80 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ = get_logger(__name__)
class _UpperCamelCase ( enum.Enum ):
'''simple docstring'''
lowerCamelCase__ ='all_checks'
lowerCamelCase__ ='basic_checks'
lowerCamelCase__ ='no_checks'
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a , _a=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedDownloadedFile(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : str = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE : Tuple = " for " + verification_name if verification_name is not None else ""
if len(_a) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error")
logger.info("All the checksums matched successfully" + for_verification_name)
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
class _UpperCamelCase ( __A ):
'''simple docstring'''
def lowerCamelCase__ ( _a , _a):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(_a) - set(_a)) > 0:
raise ExpectedMoreSplits(str(set(_a) - set(_a)))
if len(set(_a) - set(_a)) > 0:
raise UnexpectedSplits(str(set(_a) - set(_a)))
SCREAMING_SNAKE_CASE : List[str] = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_a) > 0:
raise NonMatchingSplitsSizesError(str(_a))
logger.info("All the splits matched successfully.")
def lowerCamelCase__ ( _a , _a = True):
if record_checksum:
SCREAMING_SNAKE_CASE : List[str] = shaaaa()
with open(_a , "rb") as f:
for chunk in iter(lambda: f.read(1 << 20) , b""):
m.update(_a)
SCREAMING_SNAKE_CASE : Optional[int] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE : List[str] = None
return {"num_bytes": os.path.getsize(_a), "checksum": checksum}
def lowerCamelCase__ ( _a):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 76 | 0 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __A ( ) -> Union[str, Any]:
a = 9
a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a = kruskal(_lowerCamelCase , _lowerCamelCase )
a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_lowerCamelCase ) == sorted(_lowerCamelCase )
| 370 |
__UpperCamelCase : Dict = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __A ( ) -> None:
a = input("""Enter message: """ )
a = input("""Enter key [alphanumeric]: """ )
a = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
a = """encrypt"""
a = encrypt_message(__lowerCamelCase , __lowerCamelCase )
elif mode.lower().startswith("""d""" ):
a = """decrypt"""
a = decrypt_message(__lowerCamelCase , __lowerCamelCase )
print(f'\n{mode.title()}ed message:' )
print(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """encrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str:
return translate_message(__lowerCamelCase , __lowerCamelCase , """decrypt""" )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> str:
a = []
a = 0
a = key.upper()
for symbol in message:
a = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowerCamelCase ):
a = 0
else:
translated.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
if __name__ == "__main__":
main()
| 347 | 0 |
'''simple docstring'''
import cmath
import math
def _A (lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> complex:
'''simple docstring'''
_a = math.radians(lowerCAmelCase__ )
_a = math.radians(lowerCAmelCase__ )
# Convert voltage and current to rectangular form
_a = cmath.rect(lowerCAmelCase__ , lowerCAmelCase__ )
_a = cmath.rect(lowerCAmelCase__ , lowerCAmelCase__ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
a_ : Dict = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all BART models at https://huggingface.co/models?filter=bart
a_ : int = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
"tokenizer_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json",
},
}
a_ : int = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
_lowerCAmelCase = BartTokenizer
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="replace" , __magic_name__="<s>" , __magic_name__="</s>" , __magic_name__="</s>" , __magic_name__="<s>" , __magic_name__="<unk>" , __magic_name__="<pad>" , __magic_name__="<mask>" , __magic_name__=False , __magic_name__=True , **__magic_name__ , ) -> List[Any]:
super().__init__(
__magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , )
_a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __magic_name__ ) != add_prefix_space:
_a = getattr(__magic_name__ , pre_tok_state.pop('type' ) )
_a = add_prefix_space
_a = pre_tok_class(**__magic_name__ )
_a = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_a = 'post_processor'
_a = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
if tokenizer_component_instance:
_a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_a = tuple(state['sep'] )
if "cls" in state:
_a = tuple(state['cls'] )
_a = False
if state.get('add_prefix_space' , __magic_name__ ) != add_prefix_space:
_a = add_prefix_space
_a = True
if state.get('trim_offsets' , __magic_name__ ) != trim_offsets:
_a = trim_offsets
_a = True
if changes_to_apply:
_a = getattr(__magic_name__ , state.pop('type' ) )
_a = component_class(**__magic_name__ )
setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ )
@property
def __UpperCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
_a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value
_a = value
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
_a = kwargs.get('is_split_into_words' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> BatchEncoding:
_a = kwargs.get('is_split_into_words' , __magic_name__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ) -> Any:
_a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 168 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _A ( ):
"""simple docstring"""
a =ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
a =parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
a =parser.parse_args()
if not hasattr(lowercase , '''func''' ):
parser.print_help()
exit(1 )
# Run
a =args.func(lowercase )
service.run()
if __name__ == "__main__":
main() | 215 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=[30, 30] , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.02 , __A=3 , __A=None , __A=8 , __A=10 , ) -> List[Any]:
a =parent
a =batch_size
a =image_size
a =patch_size
a =num_channels
a =is_training
a =use_labels
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =type_sequence_label_size
a =initializer_range
a =num_labels
a =scope
a =n_targets
a =num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
a =(image_size[1] // patch_size) * (image_size[0] // patch_size)
a =num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
a =None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
a =[]
for i in range(self.batch_size ):
a ={}
a =torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__A )
a =torch.rand(self.n_targets , 4 , device=__A )
labels.append(__A )
a =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> int:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> List[Any]:
a =YolosModel(config=__A )
model.to(__A )
model.eval()
a =model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Dict:
a =YolosForObjectDetection(__A )
model.to(__A )
model.eval()
a =model(pixel_values=__A )
a =model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
a =model(pixel_values=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.prepare_config_and_inputs()
a , a , a =config_and_inputs
a ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__lowerCAmelCase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Any:
a =super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
a =[]
for i in range(self.model_tester.batch_size ):
a ={}
a =torch.ones(
size=(self.model_tester.n_targets,) , device=__A , dtype=torch.long )
a =torch.ones(
self.model_tester.n_targets , 4 , device=__A , dtype=torch.float )
labels.append(__A )
a =labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =YolosModelTester(self )
a =ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# YOLOS does not use inputs_embeds
pass
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__A )
a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
a =True
# in YOLOS, the seq_len is different
a =self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
a =True
a =False
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
a =len(__A )
# Check attention is always last and order is fine
a =True
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =1
self.assertEqual(out_len + added_hidden_states , len(__A ) )
a =outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self ) -> str:
def check_hidden_states_output(__A , __A , __A ):
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.hidden_states
a =getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__A ) , __A )
# YOLOS has a different seq_length
a =self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a =True
check_hidden_states_output(__A , __A , __A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =YolosModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _A ( ):
"""simple docstring"""
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(__A )
a =self.default_image_processor
a =prepare_img()
a =image_processor(images=__A , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
a =model(inputs.pixel_values )
# verify outputs
a =torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , __A )
a =torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=__A , )
a =torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __A , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __A , atol=1E-4 ) )
# verify postprocessing
a =image_processor.post_process_object_detection(
__A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
a =torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(__A )
a =[75, 75, 17, 63, 17]
a =torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(__A )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , __A , atol=1E-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , __A )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , __A ) ) | 215 | 1 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : str ) -> Optional[int]:
with open(UpperCAmelCase__ , encoding="utf-8" ) as input_file:
__SCREAMING_SNAKE_CASE = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__SCREAMING_SNAKE_CASE = input_file.read()
__SCREAMING_SNAKE_CASE = regexp.search(UpperCAmelCase__ )
return match
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : str ) -> Union[str, Any]:
with open(UpperCAmelCase__ , encoding="utf-8" ) as input_file:
__SCREAMING_SNAKE_CASE = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
__SCREAMING_SNAKE_CASE = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__SCREAMING_SNAKE_CASE = regexp.finditer(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = Path("./datasets" )
__SCREAMING_SNAKE_CASE = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase__ ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def UpperCAmelCase_ ( self : str ) -> int:
__SCREAMING_SNAKE_CASE = Path("./datasets" )
__SCREAMING_SNAKE_CASE = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase__ ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 54 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
A__ = TypeVar("""T""")
A__ = TypeVar("""U""")
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = val
_lowerCAmelCase = None
_lowerCAmelCase = None
def __repr__( self ):
"""simple docstring"""
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __lowerCAmelCase ( Generic[T, U] ):
def __init__( self ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
_lowerCAmelCase , _lowerCAmelCase = self.rear, self.head
def __repr__( self ):
"""simple docstring"""
_lowerCAmelCase = ["""DoubleLinkedList"""]
_lowerCAmelCase = self.head
while node.next is not None:
rep.append(str(_snake_case ) )
_lowerCAmelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_snake_case )
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCAmelCase = node
_lowerCAmelCase = previous
_lowerCAmelCase = node
_lowerCAmelCase = self.rear
def snake_case ( self , _snake_case ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCAmelCase = node.next
_lowerCAmelCase = node.prev
_lowerCAmelCase = None
_lowerCAmelCase = None
return node
class __lowerCAmelCase ( Generic[T, U] ):
__lowerCamelCase = {}
def __init__( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = DoubleLinkedList()
_lowerCAmelCase = capacity
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = {}
def __repr__( self ):
"""simple docstring"""
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , _snake_case ):
"""simple docstring"""
return key in self.cache
def snake_case ( self , _snake_case ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCAmelCase = self.cache[key]
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_snake_case )
return node.val
self.miss += 1
return None
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCAmelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCAmelCase = DoubleLinkedListNode(_snake_case , _snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCAmelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCAmelCase = value
self.list.add(_snake_case )
@classmethod
def snake_case ( cls , _snake_case = 128 ):
"""simple docstring"""
def cache_decorator_inner(_snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*_snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCAmelCase = LRUCache(_snake_case )
_lowerCAmelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCAmelCase = func(*_snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , _snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_snake_case , """cache_info""" , _snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 312 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase : Optional[Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
_UpperCamelCase : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 5_12,
'''facebook/dpr-ctx_encoder-multiset-base''': 5_12,
}
_UpperCamelCase : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': 5_12,
'''facebook/dpr-question_encoder-multiset-base''': 5_12,
}
_UpperCamelCase : Dict = {
'''facebook/dpr-reader-single-nq-base''': 5_12,
'''facebook/dpr-reader-multiset-base''': 5_12,
}
_UpperCamelCase : Optional[int] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
_UpperCamelCase : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
_UpperCamelCase : Any = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class UpperCAmelCase_ ( lowerCamelCase__):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : str = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : Any = DPRContextEncoderTokenizer
class UpperCAmelCase_ ( lowerCamelCase__):
lowerCamelCase__ : Dict = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Tuple = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : Dict = DPRQuestionEncoderTokenizer
_UpperCamelCase : str = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_UpperCamelCase : List[Any] = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_UpperCamelCase : Any = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(lowerCamelCase__)
class UpperCAmelCase_ :
def __call__( self , a , a = None , a = None , a = False , a = False , a = None , a = None , a = None , **a , ) -> str:
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
lowercase__ : List[Any] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowercase__ : Dict = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [titles]
lowercase__ : Tuple = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [texts]
lowercase__ : Optional[Any] = len(lowerCAmelCase__ )
lowercase__ : Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else [questions] * n_passages
assert len(lowerCAmelCase__ ) == len(
lowerCAmelCase__ ), f"""There should be as many titles than texts but got {len(lowerCAmelCase__ )} titles and {len(lowerCAmelCase__ )} texts."""
lowercase__ : int = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""]
lowercase__ : str = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )["""input_ids"""]
lowercase__ : Optional[Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
}
if return_attention_mask is not False:
lowercase__ : int = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowercase__ : int = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
def _UpperCAmelCase ( self , a , a , a = 1_6 , a = 6_4 , a = 4 , ) -> str:
lowercase__ : List[str] = reader_input["""input_ids"""]
lowercase__ : str = reader_output[:3]
lowercase__ : Tuple = len(lowerCAmelCase__ )
lowercase__ : Dict = sorted(range(lowerCAmelCase__ ) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__ )
lowercase__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
lowercase__ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowercase__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowercase__ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
lowercase__ : Any = len(lowerCAmelCase__ )
lowercase__ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(lowerCAmelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _UpperCAmelCase ( self , a , a , a , a , ) -> Union[str, Any]:
lowercase__ : Optional[Any] = []
for start_index, start_score in enumerate(lowerCAmelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowercase__ : Optional[Any] = sorted(lowerCAmelCase__ , key=lambda a : x[1] , reverse=lowerCAmelCase__ )
lowercase__ : Optional[int] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
lowercase__ : Optional[Any] = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__)
class UpperCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : int = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : Dict = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ : Tuple = DPRReaderTokenizer
| 77 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: list[int] , _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[Any] = list(range(len(_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = [v / w for v, w in zip(_lowerCamelCase , _lowerCamelCase )]
index.sort(key=lambda _lowerCamelCase : ratio[i] , reverse=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : list[float] = [0] * len(_lowerCamelCase )
for i in index:
if weight[i] <= capacity:
__SCREAMING_SNAKE_CASE : str = 1
max_value += value[i]
capacity -= weight[i]
else:
__SCREAMING_SNAKE_CASE : int = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 112 | 0 |
from collections import namedtuple
lowerCamelCase_ = namedtuple("""from_to""", """from_ to""")
lowerCamelCase_ = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def lowerCamelCase ( a_ , a_ , a_ ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ', '.join(a_ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ', '.join(a_ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_=False ) -> Tuple:
lowerCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
lowerCAmelCase_ = 'segformer.encoder.' + key
if key.startswith('backbone' ):
lowerCAmelCase_ = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCAmelCase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(a_ )-1}''' )
if "norm" in key:
lowerCAmelCase_ = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
lowerCAmelCase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(a_ )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase_ = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCAmelCase_ = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ = key[key.find('block' ) + len('block' )]
lowerCAmelCase_ = key.replace(F'''block{idx}''' , F'''block.{int(a_ )-1}''' )
if "attn.q" in key:
lowerCAmelCase_ = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCAmelCase_ = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCAmelCase_ = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCAmelCase_ = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCAmelCase_ = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCAmelCase_ = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCAmelCase_ = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCAmelCase_ = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ = key[key.find('linear_c' ) + len('linear_c' )]
lowerCAmelCase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(a_ )-1}''' )
if key.startswith('head' ):
lowerCAmelCase_ = key.replace('head' , 'classifier' )
lowerCAmelCase_ = value
return new_state_dict
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase_ = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase_ = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( a_ , a_ , a_ ) -> int:
lowerCAmelCase_ = SegformerConfig()
lowerCAmelCase_ = False
# set attributes based on model_name
lowerCAmelCase_ = 'huggingface/label-files'
if "segformer" in model_name:
lowerCAmelCase_ = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
lowerCAmelCase_ = 150
lowerCAmelCase_ = 'ade20k-id2label.json'
lowerCAmelCase_ = (1, 150, 128, 128)
elif "city" in model_name:
lowerCAmelCase_ = 19
lowerCAmelCase_ = 'cityscapes-id2label.json'
lowerCAmelCase_ = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowerCAmelCase_ = True
lowerCAmelCase_ = model_name[4:6]
lowerCAmelCase_ = 1_000
lowerCAmelCase_ = 'imagenet-1k-id2label.json'
lowerCAmelCase_ = (1, 1_000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowerCAmelCase_ = json.load(open(hf_hub_download(a_ , a_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase_ = {int(a_ ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 256
elif size == "b2":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ = [64, 128, 320, 512]
lowerCAmelCase_ = 768
lowerCAmelCase_ = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowerCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a_ , align=a_ , do_random_crop=a_ )
# prepare image
lowerCAmelCase_ = prepare_img()
lowerCAmelCase_ = image_processor(images=a_ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )
else:
lowerCAmelCase_ = torch.load(a_ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
lowerCAmelCase_ = rename_keys(a_ , encoder_only=a_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(a_ , a_ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ = False
lowerCAmelCase_ = SegformerForImageClassification(a_ )
else:
lowerCAmelCase_ = SegformerForSemanticSegmentation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCAmelCase_ = model(a_ )
lowerCAmelCase_ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.5_820, -8.7_231, -8.3_215], [-8.0_600, -10.3_529, -10.0_304], [-7.5_208, -9.4_103, -9.6_239]],
[[-12.6_918, -13.8_994, -13.7_137], [-13.3_196, -15.7_523, -15.4_789], [-12.9_343, -14.8_757, -14.9_689]],
[[-11.1_911, -11.9_421, -11.3_243], [-11.3_342, -13.6_839, -13.3_581], [-10.3_909, -12.1_832, -12.4_858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.8_173, -14.3_850, -16.3_128], [-14.5_648, -16.5_804, -18.6_568], [-14.7_223, -15.7_387, -18.4_218]],
[[-15.7_290, -17.9_171, -19.4_423], [-18.3_105, -19.9_448, -21.4_661], [-17.9_296, -18.6_497, -20.7_910]],
[[-15.0_783, -17.0_336, -18.2_789], [-16.8_771, -18.6_870, -20.1_612], [-16.2_454, -17.1_426, -19.5_055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.0_878, -10.2_081, -10.1_891], [-9.3_144, -10.7_941, -10.9_843], [-9.2_294, -10.3_855, -10.5_704]],
[[-12.2_316, -13.9_068, -13.6_102], [-12.9_161, -14.3_702, -14.3_235], [-12.5_233, -13.7_174, -13.7_932]],
[[-14.6_275, -15.2_490, -14.9_727], [-14.3_400, -15.9_687, -16.2_827], [-14.1_484, -15.4_033, -15.8_937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.3_144, -13.2_447, -14.0_802], [-13.3_614, -14.5_816, -15.6_117], [-13.3_340, -14.4_433, -16.2_219]],
[[-19.2_781, -20.4_128, -20.7_506], [-20.6_153, -21.6_566, -22.0_998], [-19.9_800, -21.0_430, -22.1_494]],
[[-18.8_739, -19.7_804, -21.1_834], [-20.1_233, -21.6_765, -23.2_944], [-20.0_315, -21.2_641, -23.6_944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.5_524, -12.0_835, -11.7_348], [-10.5_229, -13.6_446, -14.5_662], [-9.5_842, -12.8_851, -13.9_414]],
[[-15.3_432, -17.5_323, -17.0_818], [-16.3_330, -18.9_255, -19.2_101], [-15.1_340, -17.7_848, -18.3_971]],
[[-12.6_072, -14.9_486, -14.6_631], [-13.7_629, -17.0_907, -17.7_745], [-12.7_899, -16.1_695, -17.1_671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.9_295, -13.4_057, -14.8_106], [-13.3_431, -14.8_179, -15.3_781], [-14.2_836, -15.5_942, -16.1_588]],
[[-11.4_906, -12.8_067, -13.6_564], [-13.1_189, -14.0_500, -14.1_543], [-13.8_748, -14.5_136, -14.8_789]],
[[0.5_374, 0.1_067, -0.4_742], [0.1_141, -0.2_255, -0.7_099], [-0.3_000, -0.5_924, -1.3_105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-7.8_217, -9.8_767, -10.1_717], [-9.4_438, -10.9_058, -11.4_047], [-9.7_939, -12.3_495, -12.1_079]],
[[-7.1_514, -9.5_336, -10.0_860], [-9.7_776, -11.6_822, -11.8_439], [-10.1_411, -12.7_655, -12.8_972]],
[[0.3_021, 0.0_805, -0.2_310], [-0.0_328, -0.1_605, -0.2_714], [-0.1_408, -0.5_477, -0.6_976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-9.4_959, -11.3_087, -11.7_479], [-11.0_025, -12.6_540, -12.3_319], [-11.4_064, -13.0_487, -12.9_905]],
[[-9.8_905, -11.3_084, -12.0_854], [-11.1_726, -12.7_698, -12.9_583], [-11.5_985, -13.3_278, -14.1_774]],
[[0.2_213, 0.0_192, -0.2_466], [-0.1_731, -0.4_213, -0.4_874], [-0.3_126, -0.6_541, -1.1_389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-16.0_976, -16.4_856, -17.3_962], [-16.6_234, -19.0_342, -19.7_685], [-16.0_900, -18.0_661, -19.1_180]],
[[-18.4_750, -18.8_488, -19.5_074], [-19.4_030, -22.1_570, -22.5_977], [-19.1_191, -20.8_486, -22.3_783]],
[[-4.5_178, -5.5_037, -6.5_109], [-5.0_884, -7.2_174, -8.0_334], [-4.4_156, -5.8_117, -7.2_970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-14.2_081, -14.4_732, -14.1_977], [-14.5_867, -16.4_423, -16.6_356], [-13.4_441, -14.9_685, -16.8_696]],
[[-14.4_576, -14.7_073, -15.0_451], [-15.0_816, -17.6_237, -17.9_873], [-14.4_213, -16.0_199, -18.5_992]],
[[-4.7_349, -4.9_588, -5.0_966], [-4.3_210, -6.9_325, -7.2_591], [-3.4_312, -4.7_484, -7.1_917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-11.7_737, -11.9_526, -11.3_273], [-13.6_692, -14.4_574, -13.8_878], [-13.8_937, -14.6_924, -15.9_345]],
[[-14.6_706, -14.5_330, -14.1_306], [-16.1_502, -16.8_180, -16.4_269], [-16.8_338, -17.8_939, -20.1_746]],
[[1.0_491, 0.8_289, 1.0_310], [1.1_044, 0.5_219, 0.8_055], [1.0_899, 0.6_926, 0.5_590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ = torch.tensor(
[
[[-12.5_641, -13.4_777, -13.0_684], [-13.9_587, -15.8_983, -16.6_557], [-13.3_109, -15.7_350, -16.3_141]],
[[-14.7_074, -15.4_352, -14.5_944], [-16.6_353, -18.1_663, -18.6_120], [-15.1_702, -18.0_329, -18.1_547]],
[[-1.7_990, -2.0_951, -1.7_784], [-2.6_397, -3.8_245, -3.9_686], [-1.5_264, -2.8_126, -2.9_316]],
] )
else:
lowerCAmelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , a_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 14 | 0 |
"""simple docstring"""
def lowercase_ ( _snake_case = 50 ):
SCREAMING_SNAKE_CASE__ : Tuple = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 25 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = {v: k for k, v in idalabel.items()}
A_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A_ = BitConfig(
conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,)
return config
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if "stem.conv" in name:
A_ = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
A_ = name.replace("blocks" ,"layers" )
if "head.fc" in name:
A_ = name.replace("head.fc" ,"classifier.1" )
if name.startswith("norm" ):
A_ = "bit." + name
if "bit" not in name and "classifier" not in name:
A_ = "bit.encoder." + name
return name
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = get_config(__UpperCamelCase )
# load original model from timm
A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
A_ = timm_model.state_dict()
for key in state_dict.copy().keys():
A_ = state_dict.pop(__UpperCamelCase )
A_ = val.squeeze() if "head" in key else val
# load HuggingFace model
A_ = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) )
A_ = transform.transforms
A_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ = BitImageProcessor(
do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
A_ = prepare_img()
A_ = transform(__UpperCamelCase ).unsqueeze(0 )
A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
# verify logits
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ = outputs.logits
print("Logits:" ,logits[0, :3] )
print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] )
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__a :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 312 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
snake_case : Dict = "bert-base-cased"
snake_case : int = "google/pegasus-xsum"
snake_case : Any = [" Sam ate lunch today.", "Sams lunch ingredients."]
snake_case : Union[str, Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
snake_case : int = "patrickvonplaten/t5-tiny-random"
snake_case : List[str] = "sshleifer/bart-tiny-random"
snake_case : Optional[Any] = "sshleifer/tiny-mbart"
snake_case : Dict = "sshleifer/tiny-marian-en-de"
def lowerCAmelCase_ ( _snake_case : Path , _snake_case : list ) -> List[str]:
'''simple docstring'''
__magic_name__ : List[Any] = "\n".join(_snake_case )
Path(_snake_case ).open("w" ).writelines(_snake_case )
def lowerCAmelCase_ ( _snake_case : str ) -> Tuple:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_snake_case , F'''{split}.source''' ) , _snake_case )
_dump_articles(os.path.join(_snake_case , F'''{split}.target''' ) , _snake_case )
return tmp_dir
class _snake_case ( snake_case ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Dict = AutoTokenizer.from_pretrained(_a )
__magic_name__ : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__magic_name__ : List[Any] = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
__magic_name__ : Optional[int] = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
__magic_name__ : Any = 4
__magic_name__ : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__magic_name__ , __magic_name__ : List[Any] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__magic_name__ : Optional[int] = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , src_lang=_a , tgt_lang=_a , )
__magic_name__ : List[Any] = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_a , _a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__magic_name__ : Optional[int] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained(_a )
__magic_name__ : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__magic_name__ : Optional[Any] = max(len(tokenizer.encode(_a ) ) for a in ARTICLES )
__magic_name__ : Optional[Any] = max(len(tokenizer.encode(_a ) ) for a in SUMMARIES )
__magic_name__ : Any = 4
__magic_name__ : List[Any] = LegacySeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=20 , max_target_length=_a , )
__magic_name__ : Union[str, Any] = DataLoader(_a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__magic_name__ : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__magic_name__ : List[str] = tmp_dir.joinpath("train.source" ).open().readlines()
__magic_name__ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_a , _a , 128 , _a )
__magic_name__ : Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
__magic_name__ : Union[str, Any] = {x.name for x in save_dir.iterdir()}
__magic_name__ : Optional[int] = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_a ) < len(_a )
assert len(_a ) == 1
assert len(packed_examples[0] ) == sum(len(_a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def SCREAMING_SNAKE_CASE ( self ):
if not FAIRSEQ_AVAILABLE:
return
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = self._get_dataset(max_len=64 )
__magic_name__ : Tuple = 64
__magic_name__ : Union[str, Any] = ds.make_dynamic_sampler(_a , required_batch_size_multiple=_a )
__magic_name__ : List[Any] = [len(_a ) for x in batch_sampler]
assert len(set(_a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_a ) == len(_a ) # no dropped or added examples
__magic_name__ : Optional[int] = DataLoader(_a , batch_sampler=_a , collate_fn=ds.collate_fn , num_workers=2 )
__magic_name__ : int = []
__magic_name__ : List[str] = []
for batch in data_loader:
__magic_name__ : str = batch["input_ids"].shape
__magic_name__ : int = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__magic_name__ : Dict = np.product(batch["input_ids"].shape )
num_src_per_batch.append(_a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_a )
assert num_src_per_batch[0] == max(_a )
if failures:
raise AssertionError(f'''too many tokens in {len(_a )} batches''' )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = self._get_dataset(max_len=512 )
__magic_name__ : Optional[Any] = 2
__magic_name__ : Optional[int] = ds.make_sortish_sampler(_a , shuffle=_a )
__magic_name__ : Tuple = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 )
__magic_name__ : Any = DataLoader(_a , batch_size=_a , collate_fn=ds.collate_fn , num_workers=2 , sampler=_a )
__magic_name__ : Optional[int] = tokenizer.pad_token_id
def count_pad_tokens(_a , _a="input_ids" ):
return [batch[k].eq(_a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_a , k="labels" ) ) < sum(count_pad_tokens(_a , k="labels" ) )
assert sum(count_pad_tokens(_a ) ) < sum(count_pad_tokens(_a ) )
assert len(_a ) == len(_a )
def SCREAMING_SNAKE_CASE ( self , _a=1_000 , _a=128 ):
if os.getenv("USE_REAL_DATA" , _a ):
__magic_name__ : Optional[Any] = "examples/seq2seq/wmt_en_ro"
__magic_name__ : int = max_len * 2 * 64
if not Path(_a ).joinpath("train.len" ).exists():
save_len_file(_a , _a )
else:
__magic_name__ : Dict = "examples/seq2seq/test_data/wmt_en_ro"
__magic_name__ : Tuple = max_len * 4
save_len_file(_a , _a )
__magic_name__ : str = AutoTokenizer.from_pretrained(_a )
__magic_name__ : List[Any] = SeqaSeqDataset(
_a , data_dir=_a , type_path="train" , max_source_length=_a , max_target_length=_a , n_obs=_a , )
return ds, max_tokens, tokenizer
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ , __magic_name__ : str = self._get_dataset()
__magic_name__ : int = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=0 , add_extra_examples=_a ) )
__magic_name__ : Union[str, Any] = set(DistributedSortishSampler(_a , 256 , num_replicas=2 , rank=1 , add_extra_examples=_a ) )
assert idsa.intersection(_a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained(_a , use_fast=_a )
if tok_name == MBART_TINY:
__magic_name__ : int = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
__magic_name__ : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__magic_name__ : Any = SeqaSeqDataset(
_a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
__magic_name__ : Dict = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_a ) == 1 if tok_name == BART_TINY else len(_a ) == 0
| 41 |
from scipy.stats import pearsonr
import datasets
snake_case : Tuple = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
snake_case : Dict = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
snake_case : int = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"] , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a=False ):
if return_pvalue:
__magic_name__ : Union[str, Any] = pearsonr(_a , _a )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_a , _a )[0] )}
| 41 | 1 |
from __future__ import annotations
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if len(lowerCamelCase ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(lowerCamelCase )
or left < -len(lowerCamelCase )
or right >= len(lowerCamelCase )
or right < -len(lowerCamelCase )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
lowerCamelCase : List[Any] = (left + right) >> 1 # the middle
lowerCamelCase : int = find_max(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # find max in range[left, mid]
lowerCamelCase : int = find_max(lowerCamelCase, mid + 1, lowerCamelCase ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 287 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Dict = """segformer"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[3_2, 6_4, 1_6_0, 2_5_6] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=2_5_6 , __magic_name__=2_5_5 , **__magic_name__ , ):
super().__init__(**__magic_name__ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __magic_name__ , )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = depths
lowerCamelCase : List[Any] = sr_ratios
lowerCamelCase : int = hidden_sizes
lowerCamelCase : Union[str, Any] = patch_sizes
lowerCamelCase : Optional[Any] = strides
lowerCamelCase : Dict = mlp_ratios
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = hidden_dropout_prob
lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase : Dict = classifier_dropout_prob
lowerCamelCase : Tuple = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : Any = decoder_hidden_size
lowerCamelCase : str = kwargs.get("""reshape_last_stage""" , __magic_name__ )
lowerCamelCase : Dict = semantic_loss_ignore_index
class A__ ( __SCREAMING_SNAKE_CASE):
_UpperCAmelCase : str = version.parse("""1.11""")
@property
def UpperCamelCase__ ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase__ ( self ):
return 1e-4
@property
def UpperCamelCase__ ( self ):
return 1_2
| 287 | 1 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = (1 - _cos) / 2
UpperCAmelCase__ = 1 - _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = (1 + _cos) / 2
UpperCAmelCase__ = -1 - _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = _sin / 2
UpperCAmelCase__ = 0
UpperCAmelCase__ = -ba
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 1 - alpha
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 + alpha
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 10 ** (gain_db / 40)
UpperCAmelCase__ = 1 + alpha * big_a
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha * big_a
UpperCAmelCase__ = 1 + alpha / big_a
UpperCAmelCase__ = -2 * _cos
UpperCAmelCase__ = 1 - alpha / big_a
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 10 ** (gain_db / 40)
UpperCAmelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ = 2 * sqrt(SCREAMING_SNAKE_CASE__ ) * alpha
UpperCAmelCase__ = big_a * (pmc + aaa)
UpperCAmelCase__ = 2 * big_a * mpc
UpperCAmelCase__ = big_a * (pmc - aaa)
UpperCAmelCase__ = ppmc + aaa
UpperCAmelCase__ = -2 * pmpc
UpperCAmelCase__ = ppmc - aaa
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
UpperCAmelCase__ = tau * frequency / samplerate
UpperCAmelCase__ = sin(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = cos(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = _sin / (2 * q_factor)
UpperCAmelCase__ = 10 ** (gain_db / 40)
UpperCAmelCase__ = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase__ = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase__ = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase__ = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase__ = 2 * sqrt(SCREAMING_SNAKE_CASE__ ) * alpha
UpperCAmelCase__ = big_a * (ppmc + aaa)
UpperCAmelCase__ = -2 * big_a * pmpc
UpperCAmelCase__ = big_a * (ppmc - aaa)
UpperCAmelCase__ = pmc + aaa
UpperCAmelCase__ = 2 * mpc
UpperCAmelCase__ = pmc - aaa
UpperCAmelCase__ = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 61 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = tempfile.mkdtemp()
UpperCAmelCase__ = BlipImageProcessor()
UpperCAmelCase__ = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase__ = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
UpperCAmelCase__ = InstructBlipProcessor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).tokenizer
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **_UpperCAmelCase : Dict ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).qformer_tokenizer
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
UpperCAmelCase__ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = image_processor(_UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase__ = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = processor(text=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = qformer_tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ = processor.batch_decode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_image_processor()
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_qformer_tokenizer()
UpperCAmelCase__ = InstructBlipProcessor(
tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase , qformer_tokenizer=_UpperCAmelCase )
UpperCAmelCase__ = """lower newer"""
UpperCAmelCase__ = self.prepare_image_inputs()
UpperCAmelCase__ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 61 | 1 |
"""simple docstring"""
import requests
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
snake_case_ = {"""Content-Type""": """application/json"""}
snake_case_ = requests.post(_SCREAMING_SNAKE_CASE , json={"""text""": message_body} , headers=_SCREAMING_SNAKE_CASE )
if response.status_code != 200:
snake_case_ = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 347 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 347 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Any = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "xmod"
def __init__( self : List[str] , __A : List[str]=3_0_5_2_2 , __A : Tuple=7_6_8 , __A : str=1_2 , __A : List[Any]=1_2 , __A : List[str]=3_0_7_2 , __A : List[str]="gelu" , __A : List[Any]=0.1 , __A : Tuple=0.1 , __A : str=5_1_2 , __A : Union[str, Any]=2 , __A : List[Any]=0.0_2 , __A : List[str]=1e-1_2 , __A : Tuple=1 , __A : List[Any]=0 , __A : Optional[Any]=2 , __A : Optional[int]="absolute" , __A : Optional[int]=True , __A : Dict=None , __A : Optional[int]=False , __A : Dict=2 , __A : List[str]=False , __A : Dict=True , __A : Union[str, Any]=True , __A : Tuple=("en_XX",) , __A : Optional[Any]=None , **__A : Tuple , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : List[str] = adapter_reduction_factor
snake_case__ : List[Any] = adapter_layer_norm
snake_case__ : str = adapter_reuse_layer_norm
snake_case__ : Union[str, Any] = ln_before_adapter
snake_case__ : Tuple = list(__A )
snake_case__ : int = default_language
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : int ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = HfArgumentParser(A__ )
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
UpperCamelCase = TensorFlowBenchmark(args=A__ )
try:
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
UpperCamelCase = ' '.join(str(A__ ).split(' ' )[:-1] )
UpperCamelCase = ''
UpperCamelCase = eval(str(A__ ).split(' ' )[-1] )
UpperCamelCase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A__ )
if len(A__ ) > 0:
UpperCamelCase = full_error_msg + begin_error_msg + str(A__ )
raise ValueError(A__ )
benchmark.run()
if __name__ == "__main__":
main()
| 28 |
# using dfs for finding eulerian path traversal
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_lowerCamelCase , _lowerCamelCase : Dict =True, True
_lowerCamelCase : Optional[Any] =dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return path
def a_ ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =0
_lowerCamelCase : Union[str, Any] =-1
for i in range(SCREAMING_SNAKE_CASE__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_lowerCamelCase : Tuple =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : Tuple =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_lowerCamelCase , _lowerCamelCase : Optional[int] =check_circuit_or_path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
_lowerCamelCase : Any =1
if check == 2:
_lowerCamelCase : Tuple =odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
_lowerCamelCase : int =dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(SCREAMING_SNAKE_CASE__ )
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_lowerCamelCase : str ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_lowerCamelCase : List[Any] ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_lowerCamelCase : Any ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
_lowerCamelCase : Dict ={
1: [],
2: []
# all degree is zero
}
_lowerCamelCase : str =10
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
check_euler(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 199 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=2, lowerCamelCase_=7, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=9_9, lowerCamelCase_=3_6, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=5_1_2, lowerCamelCase_=1_6, lowerCamelCase_=2, lowerCamelCase_=0.02, lowerCamelCase_=6, lowerCamelCase_=6, lowerCamelCase_=3, lowerCamelCase_=4, lowerCamelCase_=None, lowerCamelCase_=1_0_0_0, ):
'''simple docstring'''
lowerCamelCase__ : Dict = parent
lowerCamelCase__ : Optional[int] = batch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : int = image_size
lowerCamelCase__ : Dict = patch_size
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : List[Any] = hidden_size
lowerCamelCase__ : int = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : List[Any] = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = type_vocab_size
lowerCamelCase__ : Optional[int] = type_sequence_label_size
lowerCamelCase__ : str = initializer_range
lowerCamelCase__ : str = coordinate_size
lowerCamelCase__ : Optional[Any] = shape_size
lowerCamelCase__ : Union[str, Any] = num_labels
lowerCamelCase__ : Optional[Any] = num_choices
lowerCamelCase__ : Dict = scope
lowerCamelCase__ : List[str] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
lowerCamelCase__ : List[Any] = text_seq_length
lowerCamelCase__ : Optional[Any] = (image_size // patch_size) ** 2 + 1
lowerCamelCase__ : Optional[int] = self.text_seq_length + self.image_seq_length
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox )
lowerCamelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowerCamelCase__ : List[str] = bbox[i, j, 3]
lowerCamelCase__ : List[Any] = bbox[i, j, 1]
lowerCamelCase__ : int = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
lowerCamelCase__ : Tuple = bbox[i, j, 2]
lowerCamelCase__ : Optional[int] = bbox[i, j, 0]
lowerCamelCase__ : Tuple = tmp_coordinate
lowerCamelCase__ : Tuple = tf.constant(lowerCamelCase_ )
lowerCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : int = None
if self.use_input_mask:
lowerCamelCase__ : List[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
lowerCamelCase__ : Dict = None
if self.use_token_type_ids:
lowerCamelCase__ : Tuple = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size )
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels )
lowerCamelCase__ : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, coordinate_size=self.coordinate_size, shape_size=self.shape_size, input_size=self.image_size, patch_size=self.patch_size, )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFLayoutLMvaModel(config=lowerCamelCase_ )
# text + image
lowerCamelCase__ : Dict = model(lowerCamelCase_, pixel_values=lowerCamelCase_, training=lowerCamelCase_ )
lowerCamelCase__ : Any = model(
lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, training=lowerCamelCase_, )
lowerCamelCase__ : int = model(lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, training=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
# text only
lowerCamelCase__ : str = model(lowerCamelCase_, training=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
lowerCamelCase__ : Tuple = model({'pixel_values': pixel_values}, training=lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.num_labels
lowerCamelCase__ : Union[str, Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = model(
lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, training=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[int] = TFLayoutLMvaForTokenClassification(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(
lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, labels=lowerCamelCase_, training=lowerCamelCase_, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = 2
lowerCamelCase__ : List[Any] = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(
lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, attention_mask=lowerCamelCase_, token_type_ids=lowerCamelCase_, start_positions=lowerCamelCase_, end_positions=lowerCamelCase_, training=lowerCamelCase_, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : Optional[Any] = config_and_inputs
lowerCamelCase__ : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Tuple = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Optional[Any] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCamelCase__ : Dict = False
lowerCamelCase__ : str = False
lowerCamelCase__ : Any = False
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
return True
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Tuple = copy.deepcopy(lowerCamelCase_ )
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : List[str] = {
k: tf.tile(tf.expand_dims(lowerCamelCase_, 1 ), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase_, tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : Dict = tf.ones(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : str = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
lowerCamelCase__ : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : Any = tf.zeros(self.model_tester.batch_size, dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase_ ):
lowerCamelCase__ : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.intaa )
return inputs_dict
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = TFLayoutLMvaModelTester(self )
lowerCamelCase__ : Optional[int] = ConfigTester(self, config_class=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = model_class(lowerCamelCase_ )
if getattr(lowerCamelCase_, 'hf_compute_loss', lowerCamelCase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
lowerCamelCase__ : Dict = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys(), reverse=lowerCamelCase_ )[0]
]
lowerCamelCase__ : Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
lowerCamelCase__ : int = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = prepared_for_class.pop('input_ids' )
lowerCamelCase__ : str = model(lowerCamelCase_, **lowerCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
lowerCamelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Dict = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
lowerCamelCase__ : List[str] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
lowerCamelCase__ : int = -1_0_0
lowerCamelCase__ : str = tf.convert_to_tensor(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, **lowerCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
lowerCamelCase__ : Dict = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
lowerCamelCase__ : Optional[Any] = self._prepare_for_class(inputs_dict.copy(), lowerCamelCase_, return_labels=lowerCamelCase_ )
# Get keys that were added with the _prepare_for_class function
lowerCamelCase__ : Any = prepared_for_class.keys() - inputs_dict.keys()
lowerCamelCase__ : Union[str, Any] = inspect.signature(model.call ).parameters
lowerCamelCase__ : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
lowerCamelCase__ : Optional[Any] = {0: 'input_ids'}
for label_key in label_keys:
lowerCamelCase__ : Optional[Any] = signature_names.index(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = label_key
lowerCamelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
lowerCamelCase__ : int = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
lowerCamelCase__ : str = prepared_for_class[value]
lowerCamelCase__ : str = tuple(lowerCamelCase_ )
# Send to model
lowerCamelCase__ : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def a__ (self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase__ : Optional[int] = type
self.model_tester.create_and_check_model(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = TFLayoutLMvaModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase_ ) if is_vision_available() else None
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
lowerCamelCase__ : str = self.default_image_processor
lowerCamelCase__ : Tuple = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=lowerCamelCase_, return_tensors='tf' ).pixel_values
lowerCamelCase__ : List[str] = tf.constant([[1, 2]] )
lowerCamelCase__ : Tuple = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ), axis=0 )
# forward pass
lowerCamelCase__ : List[str] = model(input_ids=lowerCamelCase_, bbox=lowerCamelCase_, pixel_values=lowerCamelCase_, training=lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : Tuple = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape, lowerCamelCase_ )
lowerCamelCase__ : List[Any] = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], lowerCamelCase_, atol=1e-4 ) )
| 316 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Any = s.rsplit(_lowerCamelCase , _lowerCamelCase )
return new.join(_lowerCamelCase )
def lowerCamelCase_ ( _lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[Any] = {}
lowerCamelCase__ : Any = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowerCamelCase__ : Union[str, Any] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
lowerCamelCase__ : Dict = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
lowerCamelCase__ : str = rreplace(_lowerCamelCase , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
lowerCamelCase__ : Optional[Any] = rreplace(_lowerCamelCase , '.b' , '.bias' , 1 )
lowerCamelCase__ : int = value.float()
return upgrade
@torch.no_grad()
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True ):
from dall_e import Encoder
lowerCamelCase__ : List[str] = Encoder()
if os.path.exists(_lowerCamelCase ):
lowerCamelCase__ : Optional[int] = torch.load(_lowerCamelCase )
else:
lowerCamelCase__ : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : List[Any] = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
lowerCamelCase__ : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
lowerCamelCase__ : Dict = FlavaImageCodebookConfig()
lowerCamelCase__ : Tuple = FlavaImageCodebook(_lowerCamelCase ).eval()
lowerCamelCase__ : List[str] = encoder.state_dict()
lowerCamelCase__ : Any = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = hf_model.state_dict()
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
lowerCamelCase__ : Optional[int] = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A_ : str = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 316 | 1 |
from ...configuration_utils import PretrainedConfig
lowercase_ = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'tapas'
def __init__( self : Optional[Any],lowercase_ : Union[str, Any]=3_0_5_2_2,lowercase_ : List[str]=7_6_8,lowercase_ : int=1_2,lowercase_ : Optional[Any]=1_2,lowercase_ : str=3_0_7_2,lowercase_ : Optional[Any]="gelu",lowercase_ : Union[str, Any]=0.1,lowercase_ : str=0.1,lowercase_ : Optional[int]=1_0_2_4,lowercase_ : int=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0],lowercase_ : Any=0.02,lowercase_ : int=1E-12,lowercase_ : Union[str, Any]=0,lowercase_ : Dict=10.0,lowercase_ : Any=0,lowercase_ : Any=1.0,lowercase_ : Any=None,lowercase_ : Optional[Any]=1.0,lowercase_ : Tuple=False,lowercase_ : Any=None,lowercase_ : Optional[int]=1.0,lowercase_ : Dict=1.0,lowercase_ : Any=False,lowercase_ : Optional[int]=False,lowercase_ : Union[str, Any]="ratio",lowercase_ : Tuple=None,lowercase_ : Tuple=None,lowercase_ : Tuple=6_4,lowercase_ : Optional[int]=3_2,lowercase_ : Dict=False,lowercase_ : List[Any]=True,lowercase_ : Optional[int]=False,lowercase_ : Optional[Any]=False,lowercase_ : List[Any]=True,lowercase_ : List[Any]=False,lowercase_ : int=None,lowercase_ : Optional[int]=None,**lowercase_ : int,)-> str:
'''simple docstring'''
super().__init__(pad_token_id=lowercase_,**lowercase_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_sizes
A__ = initializer_range
A__ = layer_norm_eps
# Fine-tuning task hyperparameters
A__ = positive_label_weight
A__ = num_aggregation_labels
A__ = aggregation_loss_weight
A__ = use_answer_as_supervision
A__ = answer_loss_importance
A__ = use_normalized_answer_loss
A__ = huber_loss_delta
A__ = temperature
A__ = aggregation_temperature
A__ = use_gumbel_for_cells
A__ = use_gumbel_for_aggregation
A__ = average_approximation_function
A__ = cell_selection_preference
A__ = answer_loss_cutoff
A__ = max_num_rows
A__ = max_num_columns
A__ = average_logits_per_cell
A__ = select_one_column
A__ = allow_empty_column_selection
A__ = init_cell_selection_weights_to_zero
A__ = reset_position_index_per_cell
A__ = disable_per_token_loss
# Aggregation hyperparameters
A__ = aggregation_labels
A__ = no_aggregation_label_index
if isinstance(self.aggregation_labels,lowercase_ ):
A__ = {int(lowercase_ ): v for k, v in aggregation_labels.items()}
| 7 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizer
_SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Any = '<pad>'
lowerCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : int = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE__ ( self : int ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : List[str] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : str = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : int = tempfile.mkdtemp()
lowerCAmelCase_ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE__ ( self : int ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name )
lowerCAmelCase_ : Tuple = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = pickle.dumps(SCREAMING_SNAKE_CASE_ )
pickle.loads(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase_ : Dict = self.get_rust_tokenizer()
lowerCAmelCase_ : Tuple = 'I was born in 92000, and this is falsé.'
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self.get_rust_tokenizer()
lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Any = 'Hello World!'
lowerCAmelCase_ : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
lowerCAmelCase_ : int = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
# fmt: off
lowerCAmelCase_ : List[str] = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 224 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__UpperCamelCase : Optional[Any] = "0.12" # assumed parallelism: 8
@require_flax
@is_staging_test
class __magic_name__ ( unittest.TestCase):
@classmethod
def UpperCAmelCase__ ( cls : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : str = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def UpperCAmelCase__ ( cls : List[str] ) -> str:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase__ : Union[str, Any] = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
UpperCamelCase__ : int = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCamelCase__ : int = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ , repo_id='''test-model-flax''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCamelCase__ : Dict = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
UpperCamelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=F"{key} not identical" )
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase__ : Tuple = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
UpperCamelCase__ : Any = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCamelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
UpperCamelCase__ : List[str] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
UpperCamelCase__ : Tuple = flatten_dict(unfreeze(model.params ) )
UpperCamelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase__ : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=F"{key} not identical" )
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Optional[int] = flatten_dict(modela.params )
UpperCamelCase__ : List[str] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCamelCase__ : Optional[int] = False
return models_are_equal
@require_flax
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCamelCase__ : str = FlaxBertModel(lowerCamelCase__ )
UpperCamelCase__ : Tuple = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ : int = FlaxBertModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCamelCase__ : Dict = FlaxBertModel(lowerCamelCase__ )
UpperCamelCase__ : List[str] = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , max_shard_size='''10KB''' )
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ : List[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Dict = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ : str = '''bert'''
UpperCamelCase__ : Union[str, Any] = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ : Dict = FlaxBertModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : List[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = '''bert'''
UpperCamelCase__ : Union[str, Any] = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(lowerCamelCase__ ):
UpperCamelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 352 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _a ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ : Dict = image.size
UpperCamelCase__ , UpperCamelCase__ : List[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
UpperCamelCase__ : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
UpperCamelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ : int = torch.from_numpy(SCREAMING_SNAKE_CASE )
return 2.0 * image - 1.0
class __magic_name__ ( __lowerCAmelCase):
def __init__( self : Dict , lowerCamelCase__ : VQModel , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : int , lowerCamelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : Optional[int] = 100 , lowerCamelCase__ : Optional[float] = 0.0 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : int = 1
elif isinstance(lowerCamelCase__ , torch.Tensor ):
UpperCamelCase__ : Dict = image.shape[0]
else:
raise ValueError(F"`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase__ )}" )
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : Any = preprocess(lowerCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ : Tuple = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase__ : Any = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase__ : Union[str, Any] = next(self.unet.parameters() ).dtype
UpperCamelCase__ : Any = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=lowerCamelCase__ )
UpperCamelCase__ : Any = image.to(device=self.device , dtype=lowerCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
UpperCamelCase__ : str = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ : Dict = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ : Optional[int] = {}
if accepts_eta:
UpperCamelCase__ : Union[str, Any] = eta
for t in self.progress_bar(lowerCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase__ : Any = torch.cat([latents, image] , dim=1 )
UpperCamelCase__ : List[str] = self.scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
# predict the noise residual
UpperCamelCase__ : Dict = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : Tuple = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase__ : Tuple = self.vqvae.decode(lowerCamelCase__ ).sample
UpperCamelCase__ : Tuple = torch.clamp(lowerCamelCase__ , -1.0 , 1.0 )
UpperCamelCase__ : Any = image / 2 + 0.5
UpperCamelCase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : List[str] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 51 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCamelCase ( snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = TransfoXLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
_UpperCAmelCase = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = '<unk> UNwanted , running'
_UpperCAmelCase = '<unk> unwanted, running'
return input_text, output_text
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase )
_UpperCAmelCase = tokenizer.tokenize('<unk> UNwanted , running' )
self.assertListEqual(UpperCAmelCase , ['<unk>', 'unwanted', ',', 'running'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [0, 4, 8, 7] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = TransfoXLTokenizer(lower_case=UpperCAmelCase )
_UpperCAmelCase = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
_UpperCAmelCase = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase ) , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = len(UpperCAmelCase )
tokenizer.add_tokens(['new1', 'new2'] )
tokenizer.move_added_token('new1' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , 'new1' )
| 39 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __A ( )-> tuple[list[int], int]:
"""simple docstring"""
_UpperCAmelCase = [randint(-1_000 , 1_000 ) for i in range(10 )]
_UpperCAmelCase = randint(-5_000 , 5_000 )
return (arr, r)
_a = make_dataset()
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(__lowerCAmelCase , 3 ):
if sum(__lowerCAmelCase ) == target:
return tuple(sorted(__lowerCAmelCase ) )
return (0, 0, 0)
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
_UpperCAmelCase = len(__lowerCAmelCase )
for i in range(n - 1 ):
_UpperCAmelCase , _UpperCAmelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __A ( )-> tuple[float, float]:
"""simple docstring"""
_UpperCAmelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_UpperCAmelCase = '\ntriplet_sum1(*dataset)\n'
_UpperCAmelCase = '\ntriplet_sum2(*dataset)\n'
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
_UpperCAmelCase = repeat(setup=__lowerCAmelCase , stmt=__lowerCAmelCase , repeat=5 , number=10_000 )
return (min(__lowerCAmelCase ), min(__lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 39 | 1 |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = None
@experimental
def a_ ( __lowercase : int , __lowercase : int , __lowercase : Tuple , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : Optional[int] ) -> Union[str, Any]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return _map_with_joblib(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
def a_ ( __lowercase : int , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : Union[str, Any] ) -> Tuple:
_snake_case = num_proc if num_proc <= len(__lowercase ) else len(__lowercase )
_snake_case = [] # We organize the splits ourselve (contiguous splits)
for index in range(__lowercase ):
_snake_case = len(__lowercase ) // num_proc
_snake_case = len(__lowercase ) % num_proc
_snake_case = div * index + min(__lowercase , __lowercase )
_snake_case = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f'''Error dividing inputs iterable among processes. '''
f'''Total number of objects {len(__lowercase )}, '''
f'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
f'''Spawning {num_proc} processes for {len(__lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
_snake_case , _snake_case = None, None
if not disable_tqdm:
_snake_case , _snake_case = (RLock(),), tqdm.set_lock
with Pool(__lowercase , initargs=__lowercase , initializer=__lowercase ) as pool:
_snake_case = pool.map(__lowercase , __lowercase )
logger.info(f'''Finished {num_proc} processes''' )
_snake_case = [obj for proc_res in mapped for obj in proc_res]
logger.info(f'''Unpacked {len(__lowercase )} objects''' )
return mapped
def a_ ( __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Any , __lowercase : List[Any] , __lowercase : str ) -> List[Any]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__lowercase ):
return joblib.Parallel()(
joblib.delayed(__lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a_ ( __lowercase : str ) -> int:
_snake_case = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_snake_case = None | 130 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "canine"
def __init__( self : int , lowercase : Optional[int]=768 , lowercase : Tuple=12 , lowercase : Union[str, Any]=12 , lowercase : Optional[int]=3_072 , lowercase : Tuple="gelu" , lowercase : Optional[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : int=16_384 , lowercase : Optional[int]=16 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-12 , lowercase : Optional[Any]=0 , lowercase : Dict=0xE000 , lowercase : Optional[Any]=0xE001 , lowercase : Union[str, Any]=4 , lowercase : str=4 , lowercase : Optional[int]=8 , lowercase : List[str]=16_384 , lowercase : Union[str, Any]=128 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Character config:
_snake_case = downsampling_rate
_snake_case = upsampling_kernel_size
_snake_case = num_hash_functions
_snake_case = num_hash_buckets
_snake_case = local_transformer_stride | 130 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[int] = '''table-transformer'''
lowerCamelCase : List[str] = ['''past_key_values''']
lowerCamelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : List[str] , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : Tuple=1_0_0 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : int=2_0_4_8 , UpperCAmelCase__ : List[str]=8 , UpperCAmelCase__ : Union[str, Any]=6 , UpperCAmelCase__ : Tuple=2_0_4_8 , UpperCAmelCase__ : int=8 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : List[Any]="relu" , UpperCAmelCase__ : Dict=2_5_6 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Union[str, Any]=1.0 , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : List[Any]="sine" , UpperCAmelCase__ : Optional[int]="resnet50" , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Any=False , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Optional[int]=1 , UpperCAmelCase__ : Dict=5 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : List[Any]=0.1 , **UpperCAmelCase__ : Union[str, Any] , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = backbone_config.get('model_type' )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(UpperCAmelCase__ )
# set timm attributes to None
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None, None, None
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = encoder_layers
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : int ) -> int:
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return self.d_model
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Dict = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> float:
return 1E-5
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return 1_2
| 4 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[10, 20, 30, 40] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = parent
_SCREAMING_SNAKE_CASE : Dict = batch_size
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : Any = num_channels
_SCREAMING_SNAKE_CASE : Optional[int] = embeddings_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_sizes
_SCREAMING_SNAKE_CASE : str = depths
_SCREAMING_SNAKE_CASE : str = is_training
_SCREAMING_SNAKE_CASE : Any = use_labels
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : int = num_labels
_SCREAMING_SNAKE_CASE : str = scope
_SCREAMING_SNAKE_CASE : Any = len(__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : Any = self.get_config()
return config, pixel_values
def UpperCAmelCase_ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : List[str] = FlaxRegNetModel(config=__snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = model(__snake_case )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = self.num_labels
_SCREAMING_SNAKE_CASE : List[Any] = FlaxRegNetForImageClassification(config=__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = config_and_inputs
_SCREAMING_SNAKE_CASE : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowercase__ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A_ : Union[str, Any] = False
A_ : List[str] = False
A_ : str = False
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : str = FlaxRegNetModelTester(self )
_SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def UpperCAmelCase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
return
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[Any] = model_class(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def UpperCAmelCase_ ( self ):
def check_hidden_states_output(__snake_case , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Any = model_class(__snake_case )
_SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_SCREAMING_SNAKE_CASE : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : str = model_class(__snake_case )
@jax.jit
def model_jitted(__snake_case , **__snake_case ):
return model(pixel_values=__snake_case , **__snake_case )
with self.subTest("""JIT Enabled""" ):
_SCREAMING_SNAKE_CASE : Any = model_jitted(**__snake_case ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_SCREAMING_SNAKE_CASE : Optional[int] = model_jitted(**__snake_case ).to_tuple()
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for jitted_output, output in zip(__snake_case , __snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
_SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor
_SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
_SCREAMING_SNAKE_CASE : Tuple = image_processor(images=__snake_case , return_tensors="""np""" )
_SCREAMING_SNAKE_CASE : str = model(**__snake_case )
# verify the logits
_SCREAMING_SNAKE_CASE : Tuple = (1, 1000)
self.assertEqual(outputs.logits.shape , __snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
| 200 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
UpperCamelCase : Optional[int] = get_logger(__name__)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
a : Dict = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_))
a : str = module._original_module if isinstance(UpperCAmelCase_ , _PatchedModuleObj) else module
class UpperCamelCase :
"""simple docstring"""
A : List[str] = []
def __init__( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple=None):
"""simple docstring"""
a : Union[str, Any] = obj
a : Dict = target
a : List[Any] = new
a : List[Any] = target.split('.')[0]
a : Dict = {}
a : Union[str, Any] = attrs or []
def __enter__( self : str):
"""simple docstring"""
*a , a : Optional[int] = self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(UpperCAmelCase_)):
try:
a : List[Any] = import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a : int = getattr(self.obj , UpperCAmelCase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(UpperCAmelCase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a : List[str] = obj_attr
# patch at top level
setattr(self.obj , UpperCAmelCase_ , _PatchedModuleObj(UpperCAmelCase_ , attrs=self.attrs))
a : Union[str, Any] = getattr(self.obj , UpperCAmelCase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , _PatchedModuleObj(getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) , attrs=self.attrs))
a : Optional[int] = getattr(UpperCAmelCase_ , UpperCAmelCase_)
# finally set the target attribute
setattr(UpperCAmelCase_ , UpperCAmelCase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a : Tuple = getattr(import_module('.'.join(UpperCAmelCase_)) , UpperCAmelCase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , UpperCAmelCase_) is attr_value:
a : List[Any] = getattr(self.obj , UpperCAmelCase_)
setattr(self.obj , UpperCAmelCase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a : Any = globals()['__builtins__'][target_attr]
setattr(self.obj , UpperCAmelCase_ , self.new)
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self : Optional[int] , *UpperCAmelCase_ : List[str]):
"""simple docstring"""
for attr in list(self.original):
setattr(self.obj , UpperCAmelCase_ , self.original.pop(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 345 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.