code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A_ ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase (self :Any , _UpperCamelCase :Optional[int] , _UpperCamelCase :int , _UpperCamelCase :str )-> int:
__A = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase (self :int , _UpperCamelCase :Any , _UpperCamelCase :Optional[Any] )-> Dict:
__A = generator('''Something there''' )
self.assertEqual(_a , [{'''generated_text''': ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
__A = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
] , )
__A = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase (self :Optional[int] )-> List[str]:
__A = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
__A = generator('''Something there''' , do_sample=_a )
self.assertEqual(_a , [{'''generated_text''': ''''''}] )
__A = 3
__A = generator(
'''Something there''' , num_return_sequences=_a , num_beams=_a , )
__A = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
__A = generator('''This is a test''' , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
__A = generator.model.config.eos_token_id
__A = """<pad>"""
__A = generator(
['''This is a test''', '''This is a second test'''] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase (self :int )-> int:
__A = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
__A = generator('''Something there''' , do_sample=_a )
self.assertEqual(_a , [{'''generated_text''': ''''''}] )
| 117 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
"""simple docstring"""
import enum
import shutil
import sys
_A , _A = shutil.get_terminal_size()
_A = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _lowerCamelCase ( enum.Enum ):
_lowerCamelCase :Tuple = 0
_lowerCamelCase :Any = 1
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="" ) -> int:
sys.stdout.write(str(snake_case_ ) + end )
sys.stdout.flush()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="" ) -> Optional[Any]:
forceWrite(f"""\u001b[{color}m{content}\u001b[0m""" , snake_case_ )
def lowercase_ ( ) -> str:
forceWrite("""\r""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
forceWrite(f"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def lowercase_ ( ) -> str:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def lowercase_ ( ) -> int:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 242 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A_ ( UpperCamelCase__ ):
'''simple docstring'''
a__ = "fnet"
def __init__(self , lowercase__=32_000 , lowercase__=768 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=512 , lowercase__=4 , lowercase__=0.02 , lowercase__=1E-12 , lowercase__=False , lowercase__=512 , lowercase__=3 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = use_tpu_fourier_optimizations
__UpperCAmelCase = tpu_short_seq_length
| 333 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_A : Optional[int] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = None
_A : int = None
_A : Tuple = None
_A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_A : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A : Dict = dataset
_A : int = name
_A : Union[str, Any] = con
_A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : str = num_proc
_A : Optional[Any] = to_sql_kwargs
def a__ ( self ) -> int:
_A : Any = self.to_sql_kwargs.pop("""sql""" , _a )
_A : List[str] = self.to_sql_kwargs.pop("""con""" , _a )
_A : int = self.to_sql_kwargs.pop("""index""" , _a )
_A : List[str] = self._write(index=_a , **self.to_sql_kwargs )
return written
def a__ ( self , _a ) -> Optional[int]:
_A , _A , _A : List[str] = args
_A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
_A : str = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_A : Tuple = batch.to_pandas()
_A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def a__ ( self , _a , **_a ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 26 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = len(snake_case_ )
UpperCAmelCase_ : int = len(snake_case_ )
UpperCAmelCase_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCAmelCase_ : list = []
for char_count in range(snake_case_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case_ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 61 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( UpperCamelCase__ ):
_a = "fnet"
def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : str = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : List[str] = intermediate_size
_A : List[Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : List[str] = initializer_range
_A : List[Any] = type_vocab_size
_A : List[Any] = layer_norm_eps
_A : List[str] = use_tpu_fourier_optimizations
_A : str = tpu_short_seq_length
| 26 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ :str = logging.get_logger(__name__)
a_ :Optional[int] = {"vocab_file": "vocab.txt"}
a_ :Optional[Any] = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
a_ :Any = {
"openbmb/cpm-ant-10b": 1_024,
}
def lowercase_ (A : int ):
snake_case__ : Tuple = collections.OrderedDict()
with open(snake_case_ , 'r' , encoding='utf-8' ) as reader:
snake_case__ : str = reader.readlines()
for index, token in enumerate(snake_case_ ):
snake_case__ : Any = token.rstrip('\n' )
snake_case__ : List[str] = index
return vocab
class snake_case__ ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str], _snake_case : Any, _snake_case : Dict="<unk>", _snake_case : Tuple=2_0_0 ) ->str:
snake_case__ : Optional[Any] = vocab
snake_case__ : str = unk_token
snake_case__ : Optional[int] = max_input_chars_per_word
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->int:
snake_case__ : Optional[int] = list(_a )
if len(_a ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = []
while start < len(_a ):
snake_case__ : int = len(_a )
snake_case__ : Any = None
while start < end:
snake_case__ : Optional[int] = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case__ : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_a )
snake_case__ : Any = end
return sub_tokens
class snake_case__ ( UpperCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE = False
def __init__( self : List[str], _snake_case : Tuple, _snake_case : List[Any]="<d>", _snake_case : Tuple="</d>", _snake_case : str="<s>", _snake_case : Optional[Any]="</s>", _snake_case : Optional[Any]="<pad>", _snake_case : Dict="<unk>", _snake_case : Union[str, Any]="</n>", _snake_case : List[Any]="</_>", _snake_case : Tuple="left", **_snake_case : Dict, ) ->Tuple:
requires_backends(self, ['jieba'] )
super().__init__(
bod_token=_a, eod_token=_a, bos_token=_a, eos_token=_a, pad_token=_a, unk_token=_a, line_token=_a, space_token=_a, padding_side=_a, **_a, )
snake_case__ : List[Any] = bod_token
snake_case__ : List[str] = eod_token
snake_case__ : List[str] = load_vocab(_a )
snake_case__ : List[str] = self.encoder[space_token]
snake_case__ : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case__ : Optional[int] = collections.OrderedDict(sorted(self.encoder.items(), key=lambda _snake_case : x[1] ) )
snake_case__ : str = {v: k for k, v in self.encoder.items()}
snake_case__ : List[Any] = WordpieceTokenizer(vocab=self.encoder, unk_token=self.unk_token )
@property
def lowercase_ ( self : Optional[Any] ) ->str:
return self.encoder[self.bod_token]
@property
def lowercase_ ( self : Union[str, Any] ) ->int:
return self.encoder[self.eod_token]
@property
def lowercase_ ( self : Optional[Any] ) ->Optional[Any]:
return self.encoder["\n"]
@property
def lowercase_ ( self : List[Any] ) ->int:
return len(self.encoder )
def lowercase_ ( self : Tuple ) ->List[Any]:
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase_ ( self : Any, _snake_case : int ) ->Dict:
snake_case__ : Optional[int] = []
for x in jieba.cut(_a, cut_all=_a ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_a ) )
return output_tokens
def lowercase_ ( self : List[Any], _snake_case : Dict, **_snake_case : Optional[Any] ) ->str:
snake_case__ : List[str] = [i for i in token_ids if i >= 0]
snake_case__ : Dict = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_a, **_a )
def lowercase_ ( self : Any, _snake_case : Union[str, Any] ) ->Union[str, Any]:
return token in self.encoder
def lowercase_ ( self : Optional[Any], _snake_case : Union[str, Any] ) ->str:
return "".join(_a )
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->str:
return self.encoder.get(_a, self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Optional[int], _snake_case : Optional[int] ) ->Tuple:
return self.decoder.get(_a, self.unk_token )
def lowercase_ ( self : Union[str, Any], _snake_case : int, _snake_case : List[Any] = None ) ->Tuple[str]:
if os.path.isdir(_a ):
snake_case__ : List[Any] = os.path.join(
_a, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
snake_case__ : Optional[int] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case__ : Dict = 0
if " " in self.encoder:
snake_case__ : str = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case__ : Optional[int] = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case__ : int = collections.OrderedDict(sorted(self.encoder.items(), key=lambda _snake_case : x[1] ) )
with open(_a, 'w', encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
snake_case__ : str = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowercase_ ( self : Dict, _snake_case : Dict, _snake_case : Any = None ) ->List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self : Optional[int], _snake_case : List[Any], _snake_case : List[Any] = None, _snake_case : str = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a, token_ids_a=_a, already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a ))
return [1] + ([0] * len(_a ))
| 277 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : Tuple = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26 | 0 |
import unittest
import numpy as np
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = np.shape(snake_case_ )
lowerCamelCase__ : Optional[Any] = np.shape(snake_case_ )
lowerCamelCase__ : Any = np.shape(snake_case_ )
if shape_a[0] != shape_b[0]:
lowerCamelCase__ : List[Any] = (
"""Expected the same number of rows for A and B. """
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(snake_case_ )
if shape_b[1] != shape_c[1]:
lowerCamelCase__ : List[Any] = (
"""Expected the same number of columns for B and C. """
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(snake_case_ )
lowerCamelCase__ : Dict = pseudo_inv
if a_inv is None:
try:
lowerCamelCase__ : int = np.linalg.inv(snake_case_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : str ) ->None:
lowerCamelCase__ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase__ : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase__ : int = np.array([[2, 1], [6, 3]] )
lowerCamelCase__ : Tuple = schur_complement(_a , _a , _a )
lowerCamelCase__ : Union[str, Any] = np.block([[a, b], [b.T, c]] )
lowerCamelCase__ : Any = np.linalg.det(_a )
lowerCamelCase__ : Dict = np.linalg.det(_a )
lowerCamelCase__ : Tuple = np.linalg.det(_a )
self.assertAlmostEqual(_a , det_a * det_s )
def __lowerCamelCase ( self : int ) ->None:
lowerCamelCase__ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase__ : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase__ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_a ):
schur_complement(_a , _a , _a )
def __lowerCamelCase ( self : str ) ->None:
lowerCamelCase__ : List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowerCamelCase__ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
lowerCamelCase__ : Optional[int] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_a ):
schur_complement(_a , _a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 142 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict:
_A : str = parent
_A : int = batch_size
_A : Optional[int] = num_channels
_A : List[Any] = image_size
_A : int = min_resolution
_A : Optional[int] = max_resolution
_A : Any = do_resize
_A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
_A : Optional[int] = do_thumbnail
_A : str = do_align_axis
_A : List[Any] = do_pad
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean
_A : List[str] = image_std
def a__ ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : List[str] = DonutImageProcessingTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_thumbnail""" ) )
self.assertTrue(hasattr(_a , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def a__ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Dict:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 26 | 0 |
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
if len(snake_case_ ) <= 1:
return lst
lowercase : int = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase : List[str] = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase : Tuple = 1
return lst
if __name__ == "__main__":
__a = input('''Enter numbers separated by a comma:\n''').strip()
__a = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 337 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__( self : str , a : List[Any] = True , a : Optional[int] = None , a : Dict = PILImageResampling.BILINEAR , a : Dict = True , a : Optional[Any] = 1 / 255 , a : Any = True , a : List[str] = None , a : List[str] = True , **a : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
SCREAMING_SNAKE_CASE : int = get_size_dict(_a , default_to_square=_a )
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
SCREAMING_SNAKE_CASE : str = get_size_dict(_a , param_name="crop_size" )
SCREAMING_SNAKE_CASE : List[Any] = do_resize
SCREAMING_SNAKE_CASE : Dict = size
SCREAMING_SNAKE_CASE : Union[str, Any] = resample
SCREAMING_SNAKE_CASE : int = do_rescale
SCREAMING_SNAKE_CASE : Any = rescale_factor
SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE : str = crop_size
SCREAMING_SNAKE_CASE : Optional[int] = do_flip_channel_order
def __UpperCamelCase ( self : int , a : Tuple , a : Dict , a : Dict = PIL.Image.BILINEAR , a : Optional[Any] = None , **a : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE : Optional[Any] = get_resize_output_image_size(_a , size=size["shortest_edge"] , default_to_square=_a )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : Any , a : int , a : List[str] , a : int = None , **a : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(_a , size=(size["height"], size["width"]) , data_format=_a , **_a )
def __UpperCamelCase ( self : List[Any] , a : Optional[Any] , a : Dict , a : List[Any] = None , **a : Optional[int] , ) -> List[Any]:
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : Optional[int] , a : List[Any] , a : Union[str, Any] = None ) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(_a , data_format=_a )
def __UpperCamelCase ( self : Dict , a : Tuple , a : List[Any] = None , a : List[Any] = None , a : Optional[int] = None , a : List[Any] = None , a : Optional[Any] = None , a : List[str] = None , a : Union[str, Any] = None , a : int = None , a : int = None , a : Union[str, Any] = ChannelDimension.FIRST , **a : List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : List[str] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : List[Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(_a , default_to_square=_a )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Dict = get_size_dict(_a , param_name="crop_size" )
SCREAMING_SNAKE_CASE : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[Any] = [to_numpy_array(_a ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=_a , size=_a , resample=_a ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : str = [self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : List[Any] = [self.rescale(image=_a , scale=_a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE : Optional[Any] = [self.flip_channel_order(image=_a ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = [to_channel_dimension_format(_a , _a ) for image in images]
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=_a , tensor_type=_a )
def __UpperCamelCase ( self : Any , a : Optional[Any] , a : Tuple = None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_a ) != len(_a ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(_a ):
SCREAMING_SNAKE_CASE : Optional[int] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[Any] = []
for idx in range(len(_a ) ):
SCREAMING_SNAKE_CASE : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=_a )
SCREAMING_SNAKE_CASE : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_a )
else:
SCREAMING_SNAKE_CASE : List[str] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 76 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,):
_A : Dict = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ )
_A : Tuple = Path(snake_case_ )
_A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
_A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
_A : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params
_A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_A : int = num_return_sequences
_A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_A : Optional[int] = tokenizer.model_max_length
if prefix is None:
_A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
_A : Optional[int] = SeqaSeqDataset(
snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ )
_A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn )
_A : Optional[Any] = []
for batch in tqdm(snake_case_ ):
_A : Tuple = model.generate(
input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,)
_A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
_A : Dict = batch["""ids"""]
if num_return_sequences > 1:
_A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case_,snake_case_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",)
parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" )
parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ )
parser.add_argument(
"""--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",)
parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--debug""",action="""store_true""" )
_A : Union[str, Any] = time.time()
_A , _A : List[str] = parser.parse_known_args()
_A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
_A : Dict = Path(args.save_dir + """_tmp""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
_A : int = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_A : Any = {}
if args.src_lang is not None:
_A : int = args.src_lang
if args.tgt_lang is not None:
_A : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
_A , _A : str = eval_data_dir(
args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,)
if args.local_rank <= 0:
_A : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
_A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout )
_A : Optional[int] = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
_A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_,snake_case_ )
return
_A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case_ ) as f:
_A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
_A : Dict = """translation""" in args.task
_A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
_A : Tuple = """bleu""" if calc_bleu else """rouge"""
_A : Dict = score_fn(snake_case_,snake_case_ )
_A : List[Any] = len(snake_case_ )
_A : Optional[int] = time.time() - start_time
_A : Dict = round(runtime / metrics["""n_obs"""],4 )
_A : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_,snake_case_,indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
for partial_result in partial_results:
records.extend(snake_case_ )
_A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] )
_A : List[str] = [x["""pred"""] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# WAIT FOR lots of .json files
_A : Optional[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
_A : List[str] = None
while (time.time() - start_wait) < timeout:
_A : str = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_A : List[str] = lmap(snake_case_,snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 26 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__( UpperCamelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ['''image_processor''', '''tokenizer''']
UpperCAmelCase_ : Tuple = '''LayoutLMv3ImageProcessor'''
UpperCAmelCase_ : Optional[Any] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _a , )
lowerCAmelCase = kwargs.pop("""feature_extractor""")
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(_a , _a)
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""")
# first, apply the image processor
lowerCAmelCase = self.image_processor(images=_a , return_tensors=_a)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a):
lowerCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase = features["""words"""]
lowerCAmelCase = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
lowerCAmelCase = features.pop("""pixel_values""")
if return_overflowing_tokens is True:
lowerCAmelCase = self.get_overflowing_images(_a , encoded_inputs["""overflow_to_sample_mapping"""])
lowerCAmelCase = images
return encoded_inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(_a) != len(_a):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f" {len(_a)} and {len(_a)}")
return images_with_overflow
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a)
def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase):
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a)
@property
def a_ ( self):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def a_ ( self):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _a , )
return self.image_processor_class
@property
def a_ ( self):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _a , )
return self.image_processor
| 272 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
UpperCAmelCase__ : Optional[Any] = parser.parse_args()
UpperCAmelCase__ : List[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 121 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class A_ ( unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionLDMaDPipeline
lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowerCAmelCase (self :Optional[Any] )-> List[str]:
torch.manual_seed(0 )
__A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__A = CLIPTextModel(_a )
__A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Dict , _UpperCamelCase :Tuple=0 )-> Tuple:
if str(_a ).startswith('''mps''' ):
__A = torch.manual_seed(_a )
else:
__A = torch.Generator(device=_a ).manual_seed(_a )
__A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase (self :List[Any] )-> List[Any]:
__A = """cpu""" # ensure determinism for the device-dependent torch.Generator
__A = self.get_dummy_components()
__A = StableDiffusionLDMaDPipeline(**_a )
__A = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
__A = self.get_dummy_inputs(_a )
__A = ldmad_pipe(**_a )
__A = output.rgb, output.depth
__A = rgb[0, -3:, -3:, -1]
__A = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__A = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
__A = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def _lowerCAmelCase (self :str )-> List[str]:
__A = self.get_dummy_components()
__A = StableDiffusionLDMaDPipeline(**_a )
__A = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
__A = self.get_dummy_inputs(_a )
__A = 3 * [inputs["""prompt"""]]
# forward
__A = ldmad_pipe(**_a )
__A = output.rgb, output.depth
__A = rgb_slice_a[0, -3:, -3:, -1]
__A = depth_slice_a[0, -3:, -1]
__A = self.get_dummy_inputs(_a )
__A = 3 * [inputs.pop('''prompt''' )]
__A = ldmad_pipe.tokenizer(
_a , padding='''max_length''' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
__A = text_inputs["""input_ids"""].to(_a )
__A = ldmad_pipe.text_encoder(_a )[0]
__A = prompt_embeds
# forward
__A = ldmad_pipe(**_a )
__A = output.rgb, output.depth
__A = rgb_slice_a[0, -3:, -3:, -1]
__A = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def _lowerCAmelCase (self :Optional[int] )-> Tuple:
__A = """cpu""" # ensure determinism for the device-dependent torch.Generator
__A = self.get_dummy_components()
__A = PNDMScheduler(skip_prk_steps=_a )
__A = StableDiffusionLDMaDPipeline(**_a )
__A = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
__A = self.get_dummy_inputs(_a )
__A = """french fries"""
__A = ldmad_pipe(**_a , negative_prompt=_a )
__A = output.rgb, output.depth
__A = rgb[0, -3:, -3:, -1]
__A = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
__A = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
__A = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Any )-> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :int , _UpperCamelCase :Optional[int]="cpu" , _UpperCamelCase :Any=torch.floataa , _UpperCamelCase :List[str]=0 )-> Any:
__A = torch.Generator(device=_a ).manual_seed(_a )
__A = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
__A = torch.from_numpy(_a ).to(device=_a , dtype=_a )
__A = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase (self :int )-> str:
__A = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' )
__A = ldmad_pipe.to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
__A = self.get_inputs(_a )
__A = ldmad_pipe(**_a )
__A = output.rgb, output.depth
__A = rgb[0, -3:, -3:, -1].flatten()
__A = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
__A = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
__A = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :int )-> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase (self :str , _UpperCamelCase :int , _UpperCamelCase :List[str]="cpu" , _UpperCamelCase :str=torch.floataa , _UpperCamelCase :Union[str, Any]=0 )-> Any:
__A = torch.Generator(device=_a ).manual_seed(_a )
__A = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
__A = torch.from_numpy(_a ).to(device=_a , dtype=_a )
__A = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase (self :Optional[Any] )-> Dict:
__A = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d''' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
__A = self.get_inputs(_a )
__A = ldmad_pipe(**_a )
__A = output.rgb, output.depth
__A = 0.4_9_5_5_8_6
__A = 0.3_3_7_9_5_5_1_5
__A = 1_1_2.4_8_5_1_8
__A = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def _lowerCAmelCase (self :List[Any] )-> Optional[int]:
__A = StableDiffusionLDMaDPipeline.from_pretrained('''Intel/ldm3d-4c''' ).to(_a )
ldmad_pipe.set_progress_bar_config(disable=_a )
__A = self.get_inputs(_a )
__A = ldmad_pipe(**_a )
__A = output.rgb, output.depth
__A = 0.4_1_9_4_1_2_7
__A = 0.3_5_3_7_5_5_8_6
__A = 0.5_6_3_8_5_0_2
__A = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 117 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ):
_A : Union[str, Any] = []
for k, v in d.items():
_A : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
_A : List[Any] = argparse.Namespace()
with open(snake_case_,"""r""" ) as yaml_file:
try:
_A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader )
_A : Optional[int] = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_,snake_case_,snake_case_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = MobileViTVaConfig()
_A : Tuple = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_A : Dict = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : int = 384
else:
_A : int = 256
_A : List[str] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_A : Union[str, Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : str = 384
else:
_A : List[Any] = 256
_A : List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_A : int = 151
_A : int = 512
_A : Optional[int] = """ade20k-id2label.json"""
_A : Any = True
elif task_name.startswith("""voc_""" ):
_A : List[Any] = 21
_A : Dict = 512
_A : Dict = """pascal-voc-id2label.json"""
_A : int = True
# orig_config
_A : Any = load_orig_config_file(snake_case_ )
assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model"
_A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 )
assert (
getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 )
if "_deeplabv3" in task_name:
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] )
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 )
_A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 )
# id2label
_A : List[Any] = """huggingface/label-files"""
_A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
if base_model:
_A : Optional[int] = """"""
else:
_A : Dict = """mobilevitv2."""
_A : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A : Any = k[8:]
else:
_A : List[str] = k
if ".block." in k:
_A : Any = k_new.replace(""".block.""",""".""" )
if ".conv." in k:
_A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" )
if ".norm." in k:
_A : Any = k_new.replace(""".norm.""",""".normalization.""" )
if "conv_1." in k:
_A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" )
if ".red_1x1." in k:
_A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
_A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
_A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
_A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_A : Optional[int] = [0, 1]
elif i == 4:
_A : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_A : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
_A : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" )
if "pre_norm_ffn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" )
if "classifier.1." in k:
_A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" )
if "seg_head." in k:
_A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" )
if ".aspp_layer." in k:
_A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" )
if ".aspp_pool." in k:
_A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
_A : str = False
else:
_A : int = MobileViTVaForImageClassification(snake_case_ ).eval()
_A : List[Any] = False
# remove and rename some keys of load the original model
_A : List[Any] = checkpoint
remove_unused_keys(snake_case_ )
_A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 )
_A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[Any] = model(**snake_case_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_A : List[Any] = outputs.logits
_A : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""",model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 26 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def lowercase_ ( __UpperCAmelCase = "." ) -> Optional[int]:
for dir_path, dir_names, filenames in os.walk(snake_case_ ):
lowerCAmelCase__ : Any = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case_ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case_ , snake_case_ ).lstrip("""./""" )
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
return f"""{i * ' '}*""" if i else "\n##"
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowerCAmelCase__ : Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case_ ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(snake_case_ )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def lowercase_ ( __UpperCAmelCase = "." ) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = """"""
for filepath in sorted(good_file_paths(snake_case_ ) ):
lowerCAmelCase__ : int = os.path.split(snake_case_ )
if filepath != old_path:
lowerCAmelCase__ : List[str] = print_path(snake_case_ , snake_case_ )
lowerCAmelCase__ : List[Any] = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase__ : int = f"""{filepath}/{filename}""".replace(""" """ , """%20""" )
lowerCAmelCase__ : Dict = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f"""{md_prefix(snake_case_ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md(""".""")
| 242 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26 | 0 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Optional[int] = logging.get_logger(__name__)
def __a ( SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE="." ):
__UpperCAmelCase = []
for k, v in d.items():
__UpperCAmelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_ , snake_case_ , sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
__UpperCAmelCase = argparse.Namespace()
with open(snake_case_ , '''r''' ) as yaml_file:
try:
__UpperCAmelCase = yaml.load(snake_case_ , Loader=yaml.FullLoader )
__UpperCAmelCase = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_ , snake_case_ , snake_case_ )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case_ , str(snake_case_ ) ) )
return config
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = MobileViTVaConfig()
__UpperCAmelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCAmelCase = 1_0_0_0
if int(task_name.strip().split('''_''' )[-1] ) == 3_8_4:
__UpperCAmelCase = 3_8_4
else:
__UpperCAmelCase = 2_5_6
__UpperCAmelCase = """imagenet-1k-id2label.json"""
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCAmelCase = 2_1_0_0_0
if int(task_name.strip().split('''_''' )[-1] ) == 3_8_4:
__UpperCAmelCase = 3_8_4
else:
__UpperCAmelCase = 2_5_6
__UpperCAmelCase = """imagenet-22k-id2label.json"""
elif task_name.startswith('''ade20k_''' ):
__UpperCAmelCase = 1_5_1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = """ade20k-id2label.json"""
__UpperCAmelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCAmelCase = 2_1
__UpperCAmelCase = 5_1_2
__UpperCAmelCase = """pascal-voc-id2label.json"""
__UpperCAmelCase = True
# orig_config
__UpperCAmelCase = load_orig_config_file(snake_case_ )
assert getattr(snake_case_ , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCAmelCase = getattr(snake_case_ , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case_ , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCAmelCase = getattr(snake_case_ , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCAmelCase = getattr(snake_case_ , '''model.segmentation.output_stride''' , 1_6 )
if "_deeplabv3" in task_name:
__UpperCAmelCase = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_rates''' , [1_2, 2_4, 3_6] )
__UpperCAmelCase = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_out_channels''' , 5_1_2 )
__UpperCAmelCase = getattr(snake_case_ , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCAmelCase = """huggingface/label-files"""
__UpperCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = dct.pop(snake_case_ )
__UpperCAmelCase = val
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> int:
'''simple docstring'''
if base_model:
__UpperCAmelCase = """"""
else:
__UpperCAmelCase = """mobilevitv2."""
__UpperCAmelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCAmelCase = k[8:]
else:
__UpperCAmelCase = k
if ".block." in k:
__UpperCAmelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCAmelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCAmelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCAmelCase = k_new.replace('''conv_1.''' , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
__UpperCAmelCase = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
__UpperCAmelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCAmelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
__UpperCAmelCase = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
__UpperCAmelCase = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
__UpperCAmelCase = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
__UpperCAmelCase = [0, 1]
elif i == 4:
__UpperCAmelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCAmelCase = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
__UpperCAmelCase = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
__UpperCAmelCase = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
__UpperCAmelCase = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
__UpperCAmelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCAmelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCAmelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCAmelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCAmelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCAmelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCAmelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCAmelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCAmelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_ , snake_case_ )
def __a ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = get_mobilevitva_config(snake_case_ , snake_case_ )
# load original state_dict
__UpperCAmelCase = torch.load(snake_case_ , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCAmelCase = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
__UpperCAmelCase = False
else:
__UpperCAmelCase = MobileViTVaForImageClassification(snake_case_ ).eval()
__UpperCAmelCase = False
# remove and rename some keys of load the original model
__UpperCAmelCase = checkpoint
remove_unused_keys(snake_case_ )
__UpperCAmelCase = create_rename_keys(snake_case_ , base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase = model(**snake_case_ )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCAmelCase = outputs.logits
__UpperCAmelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCAmelCase = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
A_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
A_ : Optional[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 333 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 1
@register_to_config
def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]:
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
def a__ ( self , _a , _a = None ) -> Union[str, Any]:
_A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def a__ ( self , _a , _a , _a , _a=None ) -> Dict:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_A : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_A : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
_A : List[Any] = std.unsqueeze(-1 )
_A : int = -score / std
# compute
_A : Tuple = -1.0 / len(self.timesteps )
_A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_A : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_A : Union[str, Any] = beta_t.unsqueeze(-1 )
_A : Tuple = -0.5 * beta_t * x
_A : Tuple = torch.sqrt(_a )
_A : Dict = drift - diffusion**2 * score
_A : Dict = x + drift * dt
# add noise
_A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
_A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 26 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = checkpoint
UpperCAmelCase_ : Optional[Any] = {}
UpperCAmelCase_ : str = vae_state_dict["""encoder.conv_in.weight"""]
UpperCAmelCase_ : int = vae_state_dict["""encoder.conv_in.bias"""]
UpperCAmelCase_ : Dict = vae_state_dict["""encoder.conv_out.weight"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""encoder.conv_out.bias"""]
UpperCAmelCase_ : List[Any] = vae_state_dict["""encoder.norm_out.weight"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""encoder.norm_out.bias"""]
UpperCAmelCase_ : Optional[int] = vae_state_dict["""decoder.conv_in.weight"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""decoder.conv_in.bias"""]
UpperCAmelCase_ : Union[str, Any] = vae_state_dict["""decoder.conv_out.weight"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""decoder.conv_out.bias"""]
UpperCAmelCase_ : Tuple = vae_state_dict["""decoder.norm_out.weight"""]
UpperCAmelCase_ : List[str] = vae_state_dict["""decoder.norm_out.bias"""]
UpperCAmelCase_ : Optional[int] = vae_state_dict["""quant_conv.weight"""]
UpperCAmelCase_ : List[Any] = vae_state_dict["""quant_conv.bias"""]
UpperCAmelCase_ : Optional[Any] = vae_state_dict["""post_quant_conv.weight"""]
UpperCAmelCase_ : int = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ : Any = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ : Optional[Any] = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ : Any = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ : int = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ )
}
for i in range(snake_case_ ):
UpperCAmelCase_ : str = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : Any = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
UpperCAmelCase_ : List[str] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
UpperCAmelCase_ : Optional[Any] = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ : str = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
UpperCAmelCase_ : List[str] = [key for key in vae_state_dict if """encoder.mid.block""" in key]
UpperCAmelCase_ : Union[str, Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : str = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
UpperCAmelCase_ : str = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ : str = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
UpperCAmelCase_ : str = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
UpperCAmelCase_ : int = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ : List[str] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
conv_attn_to_linear(snake_case_ )
for i in range(snake_case_ ):
UpperCAmelCase_ : List[str] = num_up_blocks - 1 - i
UpperCAmelCase_ : List[str] = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
UpperCAmelCase_ : Union[str, Any] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
UpperCAmelCase_ : List[str] = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
UpperCAmelCase_ : str = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ : int = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
UpperCAmelCase_ : Tuple = [key for key in vae_state_dict if """decoder.mid.block""" in key]
UpperCAmelCase_ : Tuple = 2
for i in range(1, num_mid_res_blocks + 1 ):
UpperCAmelCase_ : int = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
UpperCAmelCase_ : Any = renew_vae_resnet_paths(snake_case_ )
UpperCAmelCase_ : List[str] = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
UpperCAmelCase_ : Any = renew_vae_attention_paths(snake_case_ )
UpperCAmelCase_ : Optional[int] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(snake_case_, snake_case_, snake_case_, additional_replacements=[meta_path], config=snake_case_ )
conv_attn_to_linear(snake_case_ )
return new_checkpoint
def __a ( __lowerCamelCase, __lowerCamelCase, ):
# Only support V1
UpperCAmelCase_ : Tuple = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ : Tuple = io.BytesIO(r.content )
UpperCAmelCase_ : List[Any] = OmegaConf.load(snake_case_ )
UpperCAmelCase_ : Optional[int] = 512
UpperCAmelCase_ : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ : Tuple = {}
with safe_open(snake_case_, framework="pt", device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ : int = f.get_tensor(snake_case_ )
else:
UpperCAmelCase_ : int = torch.load(snake_case_, map_location=snake_case_ )["""state_dict"""]
# Convert the VAE model.
UpperCAmelCase_ : List[str] = create_vae_diffusers_config(snake_case_, image_size=snake_case_ )
UpperCAmelCase_ : Any = custom_convert_ldm_vae_checkpoint(snake_case_, snake_case_ )
UpperCAmelCase_ : List[Any] = AutoencoderKL(**snake_case_ )
vae.load_state_dict(snake_case_ )
vae.save_pretrained(snake_case_ )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
_a = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 61 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 0 |
a_ :Any = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
a_ :Tuple = [{"type": "code", "content": INSTALL_CONTENT}]
a_ :Union[str, Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 277 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = (AXIS_A - AXIS_B) / AXIS_A
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[Any] = radians(snake_case_ )
_A : str = radians(snake_case_ )
# Equation
_A : Dict = sin((phi_a - phi_a) / 2 )
_A : List[str] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
a__ : List[Any] = None
a__ : List[Any] = {
'''7B''': 11_008,
'''13B''': 13_824,
'''30B''': 17_920,
'''65B''': 22_016,
'''70B''': 28_672,
}
a__ : Any = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def UpperCAmelCase_( a__ , a__=1 , a__=256 ):
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def UpperCAmelCase_( a__ ):
"""simple docstring"""
with open(snake_case_ , '''r''' ) as f:
return json.load(snake_case_ )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
with open(snake_case_ , '''w''' ) as f:
json.dump(snake_case_ , snake_case_ )
def UpperCAmelCase_( a__ , a__ , a__ , a__=True ):
"""simple docstring"""
os.makedirs(snake_case_ , exist_ok=snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(snake_case_ , '''tmp''' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
SCREAMING_SNAKE_CASE : int = read_json(os.path.join(snake_case_ , '''params.json''' ) )
SCREAMING_SNAKE_CASE : Any = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE : Dict = params["""n_layers"""]
SCREAMING_SNAKE_CASE : Optional[Any] = params["""n_heads"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = n_heads // num_shards
SCREAMING_SNAKE_CASE : Dict = params["""dim"""]
SCREAMING_SNAKE_CASE : Optional[Any] = dim // n_heads
SCREAMING_SNAKE_CASE : List[Any] = 10_000.0
SCREAMING_SNAKE_CASE : List[str] = 1.0 / (base ** (torch.arange(0 , snake_case_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE : str = params["""n_kv_heads"""] # for GQA / MQA
SCREAMING_SNAKE_CASE : List[Any] = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE : List[str] = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE : Any = n_heads
SCREAMING_SNAKE_CASE : Optional[int] = n_heads_per_shard
SCREAMING_SNAKE_CASE : List[Any] = dim
# permute for sliced rotary
def permute(a__ , a__=n_heads , a__=dim , a__=dim ):
return w.view(snake_case_ , dima // n_heads // 2 , 2 , snake_case_ ).transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE : str = torch.load(os.path.join(snake_case_ , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
SCREAMING_SNAKE_CASE : int = [
torch.load(os.path.join(snake_case_ , F"""consolidated.{i:02d}.pth""" ) , map_location='''cpu''' )
for i in range(snake_case_ )
]
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = {"""weight_map""": {}}
for layer_i in range(snake_case_ ):
SCREAMING_SNAKE_CASE : int = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE : Union[str, Any] = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE : Any = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
SCREAMING_SNAKE_CASE : Union[str, Any] = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(snake_case_ , snake_case_ , snake_case_ )
for i in range(snake_case_ )
] , dim=0 , ).reshape(snake_case_ , snake_case_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
snake_case_ , snake_case_ , snake_case_ )
for i in range(snake_case_ )
] , dim=0 , ).reshape(snake_case_ , snake_case_ ) , snake_case_ , snake_case_ , snake_case_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
snake_case_ , snake_case_ , snake_case_ )
for i in range(snake_case_ )
] , dim=0 , ).reshape(snake_case_ , snake_case_ )
SCREAMING_SNAKE_CASE : str = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(snake_case_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(snake_case_ )] , dim=0 )
SCREAMING_SNAKE_CASE : Dict = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(snake_case_ )] , dim=1 )
SCREAMING_SNAKE_CASE : Any = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(snake_case_ )] , dim=0 )
SCREAMING_SNAKE_CASE : Any = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE : Dict = filename
param_count += v.numel()
torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
SCREAMING_SNAKE_CASE : Optional[int] = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(snake_case_ )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]['''output.weight'''] for i in range(snake_case_ )] , dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE : List[str] = filename
param_count += v.numel()
torch.save(snake_case_ , os.path.join(snake_case_ , snake_case_ ) )
# Write configs
SCREAMING_SNAKE_CASE : Tuple = {"""total_size""": param_count * 2}
write_json(snake_case_ , os.path.join(snake_case_ , '''pytorch_model.bin.index.json''' ) )
SCREAMING_SNAKE_CASE : Any = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
SCREAMING_SNAKE_CASE : int = params["""multiple_of"""] if """multiple_of""" in params else 256
SCREAMING_SNAKE_CASE : str = LlamaConfig(
hidden_size=snake_case_ , intermediate_size=compute_intermediate_size(snake_case_ , snake_case_ , snake_case_ ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=snake_case_ , )
config.save_pretrained(snake_case_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = LlamaForCausalLM.from_pretrained(snake_case_ , torch_dtype=torch.floataa , low_cpu_mem_usage=snake_case_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(snake_case_ , safe_serialization=snake_case_ )
shutil.rmtree(snake_case_ )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_class(snake_case_ )
tokenizer.save_pretrained(snake_case_ )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=snake_case_ , help='''Whether or not to save using `safetensors`.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
SCREAMING_SNAKE_CASE : int = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , snake_case_ )
if __name__ == "__main__":
main()
| 313 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Optional[int]:
super().__init__(_a )
_A : Union[str, Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> str:
super().__init__(_a )
_A : Any = config.num_labels
_A : Dict = config.num_hidden_layers
_A : List[str] = DeeRobertaModel(_a )
_A : int = nn.Dropout(config.hidden_dropout_prob )
_A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any:
_A : Optional[int] = self.num_layers
try:
_A : List[str] = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : List[str] = outputs[1]
_A : List[str] = self.dropout(_a )
_A : Optional[Any] = self.classifier(_a )
_A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Optional[int] = e.exit_layer
_A : Optional[int] = outputs[0]
if not self.training:
_A : int = entropy(_a )
_A : int = []
_A : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Union[str, Any] = MSELoss()
_A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : List[str] = MSELoss()
_A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : int = (loss,) + outputs
if not self.training:
_A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 26 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self : int ) ->Tuple:
lowerCamelCase__ : str = 0
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Tuple:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCamelCase__ : Tuple = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(_a )
self.assertIsNotNone(_a )
self.assertIsInstance(_a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_a ) , 0 )
def __lowerCamelCase ( self : List[str] ) ->List[str]:
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def __lowerCamelCase ( self : int ) ->Tuple:
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def __lowerCamelCase ( self : Tuple ) ->str:
lowerCamelCase__ : int = AutoConfig.from_pretrained(_a )
self.assertIsInstance(_a , _a )
# Check that tokenizer_type ≠ model_type
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(_a , config=_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def __lowerCamelCase ( self : List[Any] ) ->Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_a , '''vocab.txt''' ) )
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(_a , tokenizer_type='''bert''' , use_fast=_a )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_a , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_a , '''merges.txt''' ) )
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(_a , tokenizer_type='''gpt2''' , use_fast=_a )
self.assertIsInstance(_a , _a )
@require_tokenizers
def __lowerCamelCase ( self : Any ) ->Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_a , '''vocab.txt''' ) )
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(_a , tokenizer_type='''bert''' )
self.assertIsInstance(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_a , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_a , '''merges.txt''' ) )
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(_a , tokenizer_type='''gpt2''' )
self.assertIsInstance(_a , _a )
def __lowerCamelCase ( self : List[Any] ) ->Tuple:
with pytest.raises(_a ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def __lowerCamelCase ( self : int ) ->List[str]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCamelCase__ : List[str] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
if isinstance(_a , _a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _a )
else:
self.assertEqual(tokenizer.do_lower_case , _a )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def __lowerCamelCase ( self : Any ) ->Union[str, Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_a , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
lowerCamelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def __lowerCamelCase ( self : Any ) ->int:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
lowerCamelCase__ : Dict = TOKENIZER_MAPPING.values()
lowerCamelCase__ : Optional[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_a )
@require_tokenizers
def __lowerCamelCase ( self : Optional[Any] ) ->str:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_a ) , _a )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _a )
@require_tokenizers
def __lowerCamelCase ( self : Union[str, Any] ) ->Union[str, Any]:
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_a )
lowerCamelCase__ : Optional[Any] = """Hello, world. How are you?"""
lowerCamelCase__ : List[Any] = tokenizer.tokenize(_a )
self.assertEqual('''[UNK]''' , tokens[0] )
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_a )
lowerCamelCase__ : Tuple = tokenizer.tokenize(_a )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def __lowerCamelCase ( self : Dict ) ->Any:
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_a ) , _a )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def __lowerCamelCase ( self : Optional[Any] ) ->Tuple:
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def __lowerCamelCase ( self : str ) ->Dict:
lowerCamelCase__ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_a , _a )
def __lowerCamelCase ( self : List[Any] ) ->Union[str, Any]:
# Check we can load the tokenizer config of an online model.
lowerCamelCase__ : List[str] = get_tokenizer_config('''bert-base-cased''' )
lowerCamelCase__ : Any = config.pop('''_commit_hash''' , _a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_a , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCamelCase__ : Optional[int] = get_tokenizer_config(_a )
self.assertDictEqual(_a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
lowerCamelCase__ : List[Any] = get_tokenizer_config(_a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
try:
AutoConfig.register('''custom''' , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
lowerCamelCase__ : List[Any] = CustomTokenizer.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __lowerCamelCase ( self : str ) ->str:
try:
AutoConfig.register('''custom''' , _a )
# Can register in two steps
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_a , slow_tokenizer_class=_a , fast_tokenizer_class=_a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ : str = BertTokenizerFast.from_pretrained(_a )
bert_tokenizer.save_pretrained(_a )
lowerCamelCase__ : Optional[Any] = CustomTokenizerFast.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(_a )
self.assertIsInstance(_a , _a )
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained(_a , use_fast=_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : str ) ->List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a )
lowerCamelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_a )
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(_a , trust_remote_code=_a , use_fast=_a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def __lowerCamelCase ( self : List[str] ) ->int:
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
_UpperCAmelCase : List[Any] = False
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
_UpperCAmelCase : Any = NewTokenizer
_UpperCAmelCase : List[str] = False
try:
AutoConfig.register('''custom''' , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoTokenizer.register(_a , fast_tokenizer_class=_a )
# If remote code is not set, the default is to use local
lowerCamelCase__ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCamelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_a , use_fast=_a )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self : str ) ->List[Any]:
lowerCamelCase__ : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
lowerCamelCase__ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_a , use_fast=_a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __lowerCamelCase ( self : Dict ) ->Tuple:
with self.assertRaisesRegex(
_a , '''bert-base is not a local folder and is not a valid model identifier''' ):
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('''bert-base''' )
def __lowerCamelCase ( self : List[str] ) ->str:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_a , revision='''aaaaaa''' )
def __lowerCamelCase ( self : int ) ->str:
# Make sure we have cached the tokenizer.
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 142 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Tuple = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = hidden_act
_A : Optional[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : int = position_embedding_type
_A : Any = use_cache
_A : int = classifier_dropout
_A : int = pre_norm
_A : Optional[Any] = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[int] = adapter_reuse_layer_norm
_A : Any = ln_before_adapter
_A : Union[str, Any] = list(_a )
_A : List[Any] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 | 0 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowercase ( _UpperCamelCase ) ->Any:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
return (-y * np.log(snake_case_ ) - (1 - y) * np.log(1 - h )).mean()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : Optional[Any] = np.dot(snake_case_, snake_case_ )
return np.sum(y * scores - np.log(1 + np.exp(snake_case_ ) ) )
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=70000 ) ->Tuple:
"""simple docstring"""
lowercase : Optional[Any] = np.zeros(x.shape[1] )
for iterations in range(snake_case_ ):
lowercase : Any = np.dot(snake_case_, snake_case_ )
lowercase : Optional[int] = sigmoid_function(snake_case_ )
lowercase : List[Any] = np.dot(x.T, h - y ) / y.size
lowercase : Any = theta - alpha * gradient # updating the weights
lowercase : List[Any] = np.dot(snake_case_, snake_case_ )
lowercase : List[Any] = sigmoid_function(snake_case_ )
lowercase : List[str] = cost_function(snake_case_, snake_case_ )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__a = datasets.load_iris()
__a = iris.data[:, :2]
__a = (iris.target != 0) * 1
__a = 0.1
__a = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __lowercase ( _UpperCamelCase ) ->Any:
"""simple docstring"""
return sigmoid_function(
np.dot(snake_case_, snake_case_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__a) , (__a)) = (x[:, 0].min(), x[:, 0].max())
((__a) , (__a)) = (x[:, 1].min(), x[:, 1].max())
((__a) , (__a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__a = np.c_[xxa.ravel(), xxa.ravel()]
__a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 337 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 | 0 |
from collections.abc import Iterable
from typing import Any
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : Any = None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = value
SCREAMING_SNAKE_CASE : Node | None = None # Added in order to delete a node easier
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F"{self.value}": (self.left, self.right)} , indent=1 )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , a : Tuple = None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = root
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
return str(self.root )
def __UpperCamelCase ( self : List[Any] , a : List[Any] , a : List[Any] ) -> None:
"""simple docstring"""
if new_children is not None: # reset its kids
SCREAMING_SNAKE_CASE : Tuple = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_a ): # If it is the right children
SCREAMING_SNAKE_CASE : Dict = new_children
else:
SCREAMING_SNAKE_CASE : int = new_children
else:
SCREAMING_SNAKE_CASE : List[str] = new_children
def __UpperCamelCase ( self : Optional[int] , a : List[str] ) -> bool:
"""simple docstring"""
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCamelCase ( self : str ) -> bool:
"""simple docstring"""
return self.root is None
def __UpperCamelCase ( self : Union[str, Any] , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = Node(_a ) # create a new Node
if self.empty(): # if Tree is empty
SCREAMING_SNAKE_CASE : Union[str, Any] = new_node # set its root
else: # Tree is not empty
SCREAMING_SNAKE_CASE : int = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
SCREAMING_SNAKE_CASE : List[Any] = new_node # We insert the new node in a leaf
break
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = parent_node.left
else:
if parent_node.right is None:
SCREAMING_SNAKE_CASE : List[Any] = new_node
break
else:
SCREAMING_SNAKE_CASE : int = parent_node.right
SCREAMING_SNAKE_CASE : Optional[int] = parent_node
def __UpperCamelCase ( self : Union[str, Any] , *a : Optional[int] ) -> None:
"""simple docstring"""
for value in values:
self.__insert(_a )
def __UpperCamelCase ( self : int , a : Tuple ) -> Node | None:
"""simple docstring"""
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
SCREAMING_SNAKE_CASE : int = node.left if value < node.value else node.right
return node
def __UpperCamelCase ( self : str , a : List[str] = None ) -> Node | None:
"""simple docstring"""
if node is None:
if self.root is None:
return None
SCREAMING_SNAKE_CASE : Union[str, Any] = self.root
if not self.empty():
while node.right is not None:
SCREAMING_SNAKE_CASE : str = node.right
return node
def __UpperCamelCase ( self : List[Any] , a : Any = None ) -> Node | None:
"""simple docstring"""
if node is None:
SCREAMING_SNAKE_CASE : Dict = self.root
if self.root is None:
return None
if not self.empty():
SCREAMING_SNAKE_CASE : str = self.root
while node.left is not None:
SCREAMING_SNAKE_CASE : int = node.left
return node
def __UpperCamelCase ( self : List[Any] , a : List[Any] ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.search(_a ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_a , _a )
elif node.left is None: # Has only right children
self.__reassign_nodes(_a , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_a , node.left )
else:
SCREAMING_SNAKE_CASE : Tuple = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
SCREAMING_SNAKE_CASE : Union[str, Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCamelCase ( self : Tuple , a : Optional[int] ) -> Iterable:
"""simple docstring"""
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCamelCase ( self : Tuple , a : List[Any]=None ) -> Any:
"""simple docstring"""
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCamelCase ( self : Dict , a : List[Any] , a : Dict ) -> None:
"""simple docstring"""
if node:
self.inorder(_a , node.left )
arr.append(node.value )
self.inorder(_a , node.right )
def __UpperCamelCase ( self : int , a : Optional[Any] , a : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : list[int] = []
self.inorder(_a , _a ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : List[Any] = []
if curr_node is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : int = (8, 3, 6, 1, 10, 14, 13, 4, 7)
SCREAMING_SNAKE_CASE : List[Any] = BinarySearchTree()
for i in testlist:
t.insert(snake_case_)
# Prints all the elements of the list in order traversal
print(snake_case_)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case_)
print(snake_case_)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 76 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case_ = "AAPL" ):
_A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" )
_A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""",class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 26 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 272 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26 | 0 |
import itertools
import math
def lowerCamelCase__ ( a ) -> Optional[Any]:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( ) -> Union[str, Any]:
_A: Dict = 2
while True:
if is_prime(snake_case_ ):
yield num
num += 1
def lowerCamelCase__ ( a = 1_00_01 ) -> Optional[Any]:
return next(itertools.islice(prime_generator() , nth - 1 , snake_case_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 121 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
while b:
_A , _A : List[str] = b, a % b
return a
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b )
def lowerCAmelCase_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' )
if __name__ == "__main__":
main()
| 26 | 0 |
def _a ( lowerCamelCase: int , lowerCamelCase: Optional[int] , lowerCamelCase: Tuple , lowerCamelCase: Tuple ) -> str:
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _a ( lowerCamelCase: Optional[Any] , lowerCamelCase: Dict , lowerCamelCase: Any ) -> List[Any]:
'''simple docstring'''
if curr_ind == len(snake_case_ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(snake_case_ ) ):
if valid_connection(snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
# Insert current vertex into path as next transition
__A = next_ver
# Validate created path
if util_hamilton_cycle(snake_case_ , snake_case_ , curr_ind + 1 ):
return True
# Backtrack
__A = -1
return False
def _a ( lowerCamelCase: int , lowerCamelCase: Optional[int] = 0 ) -> Dict:
'''simple docstring'''
__A = [-1] * (len(snake_case_ ) + 1)
# initialize start and end of path with starting index
__A = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(snake_case_ , snake_case_ , 1 ) else []
| 117 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 242 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
from math import pi, sqrt
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_7_1.5:
raise OverflowError('''math range error''' )
elif num - int(snake_case_ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(snake_case_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __a ( ) -> Tuple:
'''simple docstring'''
assert gamma(0.5 ) == sqrt(snake_case_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ : Any = 1.0
while num:
A_ : List[Any] = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 333 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_A : Optional[int] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = None
_A : int = None
_A : Tuple = None
_A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_A : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A : Dict = dataset
_A : int = name
_A : Union[str, Any] = con
_A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : str = num_proc
_A : Optional[Any] = to_sql_kwargs
def a__ ( self ) -> int:
_A : Any = self.to_sql_kwargs.pop("""sql""" , _a )
_A : List[str] = self.to_sql_kwargs.pop("""con""" , _a )
_A : int = self.to_sql_kwargs.pop("""index""" , _a )
_A : List[str] = self._write(index=_a , **self.to_sql_kwargs )
return written
def a__ ( self , _a ) -> Optional[int]:
_A , _A , _A : List[str] = args
_A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
_A : str = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_A : Tuple = batch.to_pandas()
_A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def a__ ( self , _a , **_a ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 26 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class A_ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=18 , lowercase_=30 , lowercase_=400 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = size if size is not None else {"""shortest_edge""": 18}
UpperCAmelCase_ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : Optional[Any] = min_resolution
UpperCAmelCase_ : Any = max_resolution
UpperCAmelCase_ : Any = do_resize
UpperCAmelCase_ : Any = size
UpperCAmelCase_ : Optional[Any] = do_center_crop
UpperCAmelCase_ : str = crop_size
UpperCAmelCase_ : Union[str, Any] = do_normalize
UpperCAmelCase_ : Any = image_mean
UpperCAmelCase_ : Dict = image_std
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A_ (UpperCamelCase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = LevitImageProcessingTester(self )
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "do_center_crop" ) )
self.assertTrue(hasattr(_a , "size" ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
UpperCAmelCase_ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase_ : Any = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 61 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( UpperCamelCase__ ):
_a = "fnet"
def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : str = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : List[str] = intermediate_size
_A : List[Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : List[str] = initializer_range
_A : List[Any] = type_vocab_size
_A : List[Any] = layer_norm_eps
_A : List[str] = use_tpu_fourier_optimizations
_A : str = tpu_short_seq_length
| 26 | 0 |
def lowercase_ (A : str , A : str ):
if digit_amount > 0:
return round(number - int(snake_case_ ) , snake_case_ )
return number - int(snake_case_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 277 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE : Any = version.parse(accelerate.__version__ ).base_version
if version.parse(snake_case_ ) < version.parse('''0.17.0''' ):
return method
def wrapper(self , *a__ , **a__ ):
if hasattr(self , '''_hf_hook''' ) and hasattr(self._hf_hook , '''pre_forward''' ):
self._hf_hook.pre_forward(self )
return method(self , *snake_case_ , **snake_case_ )
return wrapper
| 313 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
@slow
@require_torch
def __lowerCamelCase ( self : str ) ->Dict:
lowerCamelCase__ : int = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
lowerCamelCase__ : Tuple = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowerCamelCase__ : Dict = bertabert.config.encoder.vocab_size
lowerCamelCase__ : List[str] = tokenizer.sep_token_id
lowerCamelCase__ : Dict = tokenizer.cls_token_id
lowerCamelCase__ : List[Any] = 1_2_8
lowerCamelCase__ : Tuple = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
lowerCamelCase__ : List[str] = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
lowerCamelCase__ : Union[str, Any] = train_dataset.select(range(3_2 ) )
lowerCamelCase__ : str = val_dataset.select(range(1_6 ) )
lowerCamelCase__ : Dict = 4
def _map_to_encoder_decoder_inputs(A : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase__ : List[Any] = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=_a , max_length=5_1_2 )
lowerCamelCase__ : int = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=_a , max_length=1_2_8 )
lowerCamelCase__ : Union[str, Any] = inputs.input_ids
lowerCamelCase__ : List[Any] = inputs.attention_mask
lowerCamelCase__ : Dict = outputs.input_ids
lowerCamelCase__ : str = outputs.input_ids.copy()
lowerCamelCase__ : Union[str, Any] = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
lowerCamelCase__ : Dict = outputs.attention_mask
assert all(len(_a ) == 5_1_2 for x in inputs.input_ids )
assert all(len(_a ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(A : Optional[Any] ):
lowerCamelCase__ : int = pred.label_ids
lowerCamelCase__ : str = pred.predictions
# all unnecessary tokens are removed
lowerCamelCase__ : Dict = tokenizer.batch_decode(_a , skip_special_tokens=_a )
lowerCamelCase__ : Optional[Any] = tokenizer.batch_decode(_a , skip_special_tokens=_a )
lowerCamelCase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_a ) )] ) / len(_a )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase__ : Dict = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
lowerCamelCase__ : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
lowerCamelCase__ : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : Any = SeqaSeqTrainingArguments(
output_dir=_a , per_device_train_batch_size=_a , per_device_eval_batch_size=_a , predict_with_generate=_a , evaluation_strategy='''steps''' , do_train=_a , do_eval=_a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase__ : Optional[Any] = SeqaSeqTrainer(
model=_a , args=_a , compute_metrics=_compute_metrics , train_dataset=_a , eval_dataset=_a , tokenizer=_a , )
# start training
trainer.train()
| 142 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict:
_A : str = parent
_A : int = batch_size
_A : Optional[int] = num_channels
_A : List[Any] = image_size
_A : int = min_resolution
_A : Optional[int] = max_resolution
_A : Any = do_resize
_A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
_A : Optional[int] = do_thumbnail
_A : str = do_align_axis
_A : List[Any] = do_pad
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean
_A : List[str] = image_std
def a__ ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : List[str] = DonutImageProcessingTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_thumbnail""" ) )
self.assertTrue(hasattr(_a , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def a__ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Dict:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 26 | 0 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__a = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 337 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26 | 0 |
def lowerCamelCase__ ( _a):
SCREAMING_SNAKE_CASE : str = [0 for i in range(len(snake_case_))]
# initialize interval's left pointer and right pointer
SCREAMING_SNAKE_CASE : Any = 0, 0
for i in range(1 , len(snake_case_)):
# case when current index is inside the interval
if i <= right_pointer:
SCREAMING_SNAKE_CASE : str = min(right_pointer - i + 1 , z_result[i - left_pointer])
SCREAMING_SNAKE_CASE : Optional[int] = min_edge
while go_next(snake_case_ , snake_case_ , snake_case_):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
SCREAMING_SNAKE_CASE : Tuple = i, i + z_result[i] - 1
return z_result
def lowerCamelCase__ ( _a , _a , _a):
return i + z_result[i] < len(snake_case_) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
SCREAMING_SNAKE_CASE : Optional[Any] = z_function(pattern + input_str)
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case_):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 76 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,):
_A : Dict = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ )
_A : Tuple = Path(snake_case_ )
_A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
_A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
_A : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params
_A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_A : int = num_return_sequences
_A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_A : Optional[int] = tokenizer.model_max_length
if prefix is None:
_A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
_A : Optional[int] = SeqaSeqDataset(
snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ )
_A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn )
_A : Optional[Any] = []
for batch in tqdm(snake_case_ ):
_A : Tuple = model.generate(
input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,)
_A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
_A : Dict = batch["""ids"""]
if num_return_sequences > 1:
_A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case_,snake_case_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",)
parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" )
parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ )
parser.add_argument(
"""--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",)
parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--debug""",action="""store_true""" )
_A : Union[str, Any] = time.time()
_A , _A : List[str] = parser.parse_known_args()
_A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
_A : Dict = Path(args.save_dir + """_tmp""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
_A : int = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_A : Any = {}
if args.src_lang is not None:
_A : int = args.src_lang
if args.tgt_lang is not None:
_A : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
_A , _A : str = eval_data_dir(
args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,)
if args.local_rank <= 0:
_A : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
_A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout )
_A : Optional[int] = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
_A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_,snake_case_ )
return
_A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case_ ) as f:
_A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
_A : Dict = """translation""" in args.task
_A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
_A : Tuple = """bleu""" if calc_bleu else """rouge"""
_A : Dict = score_fn(snake_case_,snake_case_ )
_A : List[Any] = len(snake_case_ )
_A : Optional[int] = time.time() - start_time
_A : Dict = round(runtime / metrics["""n_obs"""],4 )
_A : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_,snake_case_,indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
for partial_result in partial_results:
records.extend(snake_case_ )
_A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] )
_A : List[str] = [x["""pred"""] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# WAIT FOR lots of .json files
_A : Optional[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
_A : List[str] = None
while (time.time() - start_wait) < timeout:
_A : str = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_A : List[str] = lmap(snake_case_,snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 26 | 0 |
'''simple docstring'''
def snake_case__ ( _A: str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 272 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : Tuple = 42
__UpperCamelCase : List[str] = None
# Automatically constructed
__UpperCamelCase : List[Any] = '''dict'''
__UpperCamelCase : List[str] = None
__UpperCamelCase : Optional[Any] = field(default='''Translation''' , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __call__( self : Dict ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __magic_name__ ( self : int ):
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class UpperCAmelCase :
'''simple docstring'''
__UpperCamelCase : int = None
__UpperCamelCase : str = None
__UpperCamelCase : int = None
# Automatically constructed
__UpperCamelCase : str = '''dict'''
__UpperCamelCase : Optional[Any] = None
__UpperCamelCase : List[Any] = field(default='''TranslationVariableLanguages''' , init=UpperCamelCase__ , repr=UpperCamelCase__ )
def __magic_name__ ( self : Tuple ):
"""simple docstring"""
_A: str = sorted(set(self.languages ) ) if self.languages else None
_A: Any = len(self.languages ) if self.languages else None
def __call__( self : str ):
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: Optional[Any] = set(self.languages )
if self.languages and set(_a ) - lang_set:
raise ValueError(
F"""Some languages in example ({', '.join(sorted(set(_a ) - lang_set ) )}) are not in valid set ({', '.join(_a )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_A: Any = []
for lang, text in translation_dict.items():
if isinstance(_a , _a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_A: str = zip(*sorted(_a ) )
return {"language": languages, "translation": translations}
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 121 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26 | 0 |
def _a ( lowerCamelCase: Dict ) -> List[Any]:
'''simple docstring'''
if any(not isinstance(snake_case_ , snake_case_ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(snake_case_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(snake_case_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 117 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ):
_A : Union[str, Any] = []
for k, v in d.items():
_A : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
_A : List[Any] = argparse.Namespace()
with open(snake_case_,"""r""" ) as yaml_file:
try:
_A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader )
_A : Optional[int] = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_,snake_case_,snake_case_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = MobileViTVaConfig()
_A : Tuple = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_A : Dict = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : int = 384
else:
_A : int = 256
_A : List[str] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_A : Union[str, Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : str = 384
else:
_A : List[Any] = 256
_A : List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_A : int = 151
_A : int = 512
_A : Optional[int] = """ade20k-id2label.json"""
_A : Any = True
elif task_name.startswith("""voc_""" ):
_A : List[Any] = 21
_A : Dict = 512
_A : Dict = """pascal-voc-id2label.json"""
_A : int = True
# orig_config
_A : Any = load_orig_config_file(snake_case_ )
assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model"
_A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 )
assert (
getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 )
if "_deeplabv3" in task_name:
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] )
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 )
_A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 )
# id2label
_A : List[Any] = """huggingface/label-files"""
_A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
if base_model:
_A : Optional[int] = """"""
else:
_A : Dict = """mobilevitv2."""
_A : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A : Any = k[8:]
else:
_A : List[str] = k
if ".block." in k:
_A : Any = k_new.replace(""".block.""",""".""" )
if ".conv." in k:
_A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" )
if ".norm." in k:
_A : Any = k_new.replace(""".norm.""",""".normalization.""" )
if "conv_1." in k:
_A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" )
if ".red_1x1." in k:
_A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
_A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
_A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
_A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_A : Optional[int] = [0, 1]
elif i == 4:
_A : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_A : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
_A : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" )
if "pre_norm_ffn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" )
if "classifier.1." in k:
_A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" )
if "seg_head." in k:
_A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" )
if ".aspp_layer." in k:
_A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" )
if ".aspp_pool." in k:
_A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
_A : str = False
else:
_A : int = MobileViTVaForImageClassification(snake_case_ ).eval()
_A : List[Any] = False
# remove and rename some keys of load the original model
_A : List[Any] = checkpoint
remove_unused_keys(snake_case_ )
_A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 )
_A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[Any] = model(**snake_case_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_A : List[Any] = outputs.logits
_A : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""",model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 26 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class _lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
_lowerCamelCase :Union[str, Any] = 1
@register_to_config
def __init__( self : List[Any] , UpperCamelCase : Dict=20_00 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=20 , UpperCamelCase : Optional[int]=1E-3 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = None
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int = None ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : int=None ) -> Dict:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowerCAmelCase__ : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowerCAmelCase__ : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowerCAmelCase__ : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
lowerCAmelCase__ : List[Any] = std.unsqueeze(-1 )
lowerCAmelCase__ : int = -score / std
# compute
lowerCAmelCase__ : Tuple = -1.0 / len(self.timesteps )
lowerCAmelCase__ : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowerCAmelCase__ : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowerCAmelCase__ : Union[str, Any] = beta_t.unsqueeze(-1 )
lowerCAmelCase__ : Tuple = -0.5 * beta_t * x
lowerCAmelCase__ : Tuple = torch.sqrt(_a )
lowerCAmelCase__ : Dict = drift - diffusion**2 * score
lowerCAmelCase__ : Dict = x + drift * dt
# add noise
lowerCAmelCase__ : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
lowerCAmelCase__ : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.config.num_train_timesteps
| 242 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26 | 0 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class A_ :
'''simple docstring'''
def __init__(self , lowercase__ ) -> Union[str, Any]:
__UpperCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__UpperCAmelCase = len(_a ) - 1
def lowerCAmelCase_ (self , lowercase__ ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__UpperCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , _a ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_a ) , 5 ) == 1
return output_values
def lowerCAmelCase_ (self , lowercase__ ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__UpperCAmelCase = self.basis_function(_a )
__UpperCAmelCase = 0.0
__UpperCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCAmelCase_ (self , lowercase__ = 0.01 ) -> str:
from matplotlib import pyplot as plt # type: ignore
__UpperCAmelCase = [] # x coordinates of points to plot
__UpperCAmelCase = [] # y coordinates of points to plot
__UpperCAmelCase = 0.0
while t <= 1:
__UpperCAmelCase = self.bezier_curve_function(_a )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__UpperCAmelCase = [i[0] for i in self.list_of_points]
__UpperCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
_a , _a , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , )
plt.scatter(_a , _a , color='''red''' , label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 333 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 1
@register_to_config
def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]:
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
def a__ ( self , _a , _a = None ) -> Union[str, Any]:
_A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def a__ ( self , _a , _a , _a , _a=None ) -> Dict:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_A : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_A : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
_A : List[Any] = std.unsqueeze(-1 )
_A : int = -score / std
# compute
_A : Tuple = -1.0 / len(self.timesteps )
_A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_A : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_A : Union[str, Any] = beta_t.unsqueeze(-1 )
_A : Tuple = -0.5 * beta_t * x
_A : Tuple = torch.sqrt(_a )
_A : Dict = drift - diffusion**2 * score
_A : Dict = x + drift * dt
# add noise
_A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
_A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 26 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A_ (UpperCamelCase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ShapEImgaImgPipeline
SCREAMING_SNAKE_CASE__ : List[str] = ["""image"""]
SCREAMING_SNAKE_CASE__ : int = ["""image"""]
SCREAMING_SNAKE_CASE__ : Dict = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
SCREAMING_SNAKE_CASE__ : Dict = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 8
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCAmelCase_ : List[Any] = CLIPVisionModel(_a )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = CLIPImageProcessor(
crop_size=224 , do_center_crop=_a , do_normalize=_a , do_resize=_a , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , )
return image_processor
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCAmelCase_ : Optional[int] = PriorTransformer(**_a )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase_ : int = ShapERenderer(**_a )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.dummy_prior
UpperCAmelCase_ : Tuple = self.dummy_image_encoder
UpperCAmelCase_ : str = self.dummy_image_processor
UpperCAmelCase_ : Any = self.dummy_renderer
UpperCAmelCase_ : str = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=_a , clip_sample=_a , clip_sample_range=1.0 , )
UpperCAmelCase_ : Union[str, Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
UpperCAmelCase_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("mps" ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_a )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_a ).manual_seed(_a )
UpperCAmelCase_ : List[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = """cpu"""
UpperCAmelCase_ : List[str] = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_a )
UpperCAmelCase_ : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
UpperCAmelCase_ : Tuple = output.images[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase_ : Dict = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = torch_device == """cpu"""
UpperCAmelCase_ : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_a , relax_max_difference=_a , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_a )
UpperCAmelCase_ : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : List[str] = 2
UpperCAmelCase_ : str = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase_ : str = batch_size * [inputs[key]]
UpperCAmelCase_ : Union[str, Any] = pipe(**_a , num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
UpperCAmelCase_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
UpperCAmelCase_ : List[Any] = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
UpperCAmelCase_ : Union[str, Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCAmelCase_ : Tuple = torch.Generator(device=_a ).manual_seed(0 )
UpperCAmelCase_ : Optional[Any] = pipe(
_a , generator=_a , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a , _a )
| 61 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 0 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ :Dict = logging.get_logger(__name__)
class snake_case__ ( enum.Enum ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 1
@add_end_docstrings(UpperCamelCase__ )
class snake_case__ ( UpperCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """generated"""
def __init__( self : Optional[int], *_snake_case : Optional[Any], **_snake_case : str ) ->Optional[int]:
super().__init__(*_a, **_a )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowercase_ ( self : Union[str, Any], _snake_case : int=None, _snake_case : Any=None, _snake_case : Any=None, _snake_case : str=None, _snake_case : Any=None, _snake_case : Tuple=None, **_snake_case : Any, ) ->str:
snake_case__ : Dict = {}
if truncation is not None:
snake_case__ : Union[str, Any] = truncation
snake_case__ : Optional[Any] = generate_kwargs
snake_case__ : List[str] = {}
if return_tensors is not None and return_type is None:
snake_case__ : Any = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case__ : Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
snake_case__ : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case__ : str = self.tokenizer.encode(_a, add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
snake_case__ : Any = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ ( self : Optional[Any], _snake_case : List[Any], _snake_case : Tuple, _snake_case : Optional[int] ) ->str:
return True
def lowercase_ ( self : str, *_snake_case : Optional[Any], _snake_case : Optional[int] ) ->List[Any]:
snake_case__ : Dict = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0], _a ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
snake_case__ : Optional[int] = ([prefix + arg for arg in args[0]],)
snake_case__ : Optional[Any] = True
elif isinstance(args[0], _a ):
snake_case__ : Any = (prefix + args[0],)
snake_case__ : str = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
snake_case__ : Optional[Any] = self.tokenizer(*_a, padding=_a, truncation=_a, return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : int, *_snake_case : Optional[Any], **_snake_case : Tuple ) ->Optional[Any]:
snake_case__ : int = super().__call__(*_a, **_a )
if (
isinstance(args[0], _a )
and all(isinstance(_a, _a ) for el in args[0] )
and all(len(_a ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowercase_ ( self : str, _snake_case : Dict, _snake_case : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE, **_snake_case : Any ) ->Tuple:
snake_case__ : Optional[Any] = self._parse_and_tokenize(_a, truncation=_a, **_a )
return inputs
def lowercase_ ( self : List[str], _snake_case : int, **_snake_case : List[str] ) ->Optional[int]:
if self.framework == "pt":
snake_case__ : Tuple = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case__ : Dict = tf.shape(model_inputs['input_ids'] ).numpy()
snake_case__ : List[Any] = generate_kwargs.get('min_length', self.model.config.min_length )
snake_case__ : Union[str, Any] = generate_kwargs.get('max_length', self.model.config.max_length )
self.check_inputs(_a, generate_kwargs['min_length'], generate_kwargs['max_length'] )
snake_case__ : str = self.model.generate(**_a, **_a )
snake_case__ : Optional[int] = output_ids.shape[0]
if self.framework == "pt":
snake_case__ : Dict = output_ids.reshape(_a, out_b // in_b, *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case__ : List[str] = tf.reshape(_a, (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowercase_ ( self : Optional[int], _snake_case : Union[str, Any], _snake_case : Union[str, Any]=ReturnType.TEXT, _snake_case : Optional[Any]=False ) ->List[Any]:
snake_case__ : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case__ : int = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
snake_case__ : Optional[int] = {
F'''{self.return_name}_text''': self.tokenizer.decode(
_a, skip_special_tokens=_a, clean_up_tokenization_spaces=_a, )
}
records.append(_a )
return records
@add_end_docstrings(UpperCamelCase__ )
class snake_case__ ( UpperCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """summary"""
def __call__( self : Optional[Any], *_snake_case : Union[str, Any], **_snake_case : Tuple ) ->str:
return super().__call__(*_a, **_a )
def lowercase_ ( self : Union[str, Any], _snake_case : Dict, _snake_case : Dict, _snake_case : Any ) ->bool:
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(UpperCamelCase__ )
class snake_case__ ( UpperCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """translation"""
def lowercase_ ( self : Optional[Any], _snake_case : Any, _snake_case : str, _snake_case : Any ) ->Optional[int]:
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def lowercase_ ( self : int, *_snake_case : List[str], _snake_case : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE, _snake_case : List[str]=None, _snake_case : int=None ) ->Optional[int]:
if getattr(self.tokenizer, '_build_translation_inputs', _a ):
return self.tokenizer._build_translation_inputs(
*_a, return_tensors=self.framework, truncation=_a, src_lang=_a, tgt_lang=_a )
else:
return super()._parse_and_tokenize(*_a, truncation=_a )
def lowercase_ ( self : Optional[int], _snake_case : str=None, _snake_case : List[str]=None, **_snake_case : Optional[Any] ) ->List[Any]:
snake_case__ : str = super()._sanitize_parameters(**_a )
if src_lang is not None:
snake_case__ : Optional[Any] = src_lang
if tgt_lang is not None:
snake_case__ : Any = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case__ : Any = kwargs.get('task', self.task )
snake_case__ : Union[str, Any] = task.split('_' )
if task and len(_a ) == 4:
# translation, XX, to YY
snake_case__ : int = items[1]
snake_case__ : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple, *_snake_case : Tuple, **_snake_case : int ) ->Union[str, Any]:
return super().__call__(*_a, **_a )
| 277 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = (AXIS_A - AXIS_B) / AXIS_A
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[Any] = radians(snake_case_ )
_A : str = radians(snake_case_ )
# Equation
_A : Dict = sin((phi_a - phi_a) / 2 )
_A : List[str] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a__ : Any = logging.get_logger(__name__)
a__ : Dict = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class a_ ( UpperCamelCase__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'dpt'
def __init__( self , _lowerCamelCase=768 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=384 , _lowerCamelCase=16 , _lowerCamelCase=3 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=[2, 5, 8, 11] , _lowerCamelCase="project" , _lowerCamelCase=[4, 2, 1, 0.5] , _lowerCamelCase=[96, 192, 384, 768] , _lowerCamelCase=256 , _lowerCamelCase=-1 , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=0.4 , _lowerCamelCase=255 , _lowerCamelCase=0.1 , _lowerCamelCase=[1, 1024, 24, 24] , _lowerCamelCase=[0, 1] , _lowerCamelCase=None , **_lowerCamelCase , ) ->Optional[Any]:
super().__init__(**_a )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = BitConfig(**_a )
elif isinstance(_a , _a ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = BitConfig(**_a )
elif isinstance(_a , _a ):
SCREAMING_SNAKE_CASE : Optional[Any] = backbone_config
else:
raise ValueError(
F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
SCREAMING_SNAKE_CASE : Any = backbone_featmap_shape
SCREAMING_SNAKE_CASE : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : int = qkv_bias
SCREAMING_SNAKE_CASE : Dict = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
SCREAMING_SNAKE_CASE : int = readout_type
SCREAMING_SNAKE_CASE : List[str] = reassemble_factors
SCREAMING_SNAKE_CASE : str = neck_hidden_sizes
SCREAMING_SNAKE_CASE : Union[str, Any] = fusion_hidden_size
SCREAMING_SNAKE_CASE : List[Any] = head_in_index
SCREAMING_SNAKE_CASE : List[Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head
SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_loss_weight
SCREAMING_SNAKE_CASE : Tuple = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE : List[Any] = semantic_classifier_dropout
def __lowerCAmelCase ( self ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : str = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE : Tuple = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE : Tuple = self.__class__.model_type
return output
| 313 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Optional[int]:
super().__init__(_a )
_A : Union[str, Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> str:
super().__init__(_a )
_A : Any = config.num_labels
_A : Dict = config.num_hidden_layers
_A : List[str] = DeeRobertaModel(_a )
_A : int = nn.Dropout(config.hidden_dropout_prob )
_A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any:
_A : Optional[int] = self.num_layers
try:
_A : List[str] = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : List[str] = outputs[1]
_A : List[str] = self.dropout(_a )
_A : Optional[Any] = self.classifier(_a )
_A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Optional[int] = e.exit_layer
_A : Optional[int] = outputs[0]
if not self.training:
_A : int = entropy(_a )
_A : int = []
_A : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Union[str, Any] = MSELoss()
_A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : List[str] = MSELoss()
_A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : int = (loss,) + outputs
if not self.training:
_A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 26 | 0 |
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
def __init__( self : Optional[int] , A : List[Any] , A : Union[str, Any] , A : int , A : int , ) ->int:
super().__init__()
lowerCamelCase__ : List[Any] = value_function
lowerCamelCase__ : Union[str, Any] = unet
lowerCamelCase__ : str = scheduler
lowerCamelCase__ : Union[str, Any] = env
lowerCamelCase__ : Tuple = env.get_dataset()
lowerCamelCase__ : Optional[int] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Any = self.data[key].mean()
except: # noqa: E722
pass
lowerCamelCase__ : Union[str, Any] = {}
for key in self.data.keys():
try:
lowerCamelCase__ : Optional[Any] = self.data[key].std()
except: # noqa: E722
pass
lowerCamelCase__ : str = env.observation_space.shape[0]
lowerCamelCase__ : Union[str, Any] = env.action_space.shape[0]
def __lowerCamelCase ( self : Optional[Any] , A : str , A : Any ) ->Union[str, Any]:
return (x_in - self.means[key]) / self.stds[key]
def __lowerCamelCase ( self : int , A : Tuple , A : Tuple ) ->Optional[int]:
return x_in * self.stds[key] + self.means[key]
def __lowerCamelCase ( self : Optional[int] , A : Union[str, Any] ) ->List[str]:
if type(_a ) is dict:
return {k: self.to_torch(_a ) for k, v in x_in.items()}
elif torch.is_tensor(_a ):
return x_in.to(self.unet.device )
return torch.tensor(_a , device=self.unet.device )
def __lowerCamelCase ( self : List[str] , A : Any , A : Dict , A : Dict ) ->Optional[Any]:
for key, val in cond.items():
lowerCamelCase__ : int = val.clone()
return x_in
def __lowerCamelCase ( self : Optional[int] , A : Tuple , A : List[str] , A : int , A : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = x.shape[0]
lowerCamelCase__ : Any = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowerCamelCase__ : Dict = torch.full((batch_size,) , _a , device=self.unet.device , dtype=torch.long )
for _ in range(_a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowerCamelCase__ : List[Any] = self.value_function(x.permute(0 , 2 , 1 ) , _a ).sample
lowerCamelCase__ : Optional[Any] = torch.autograd.grad([y.sum()] , [x] )[0]
lowerCamelCase__ : Optional[int] = self.scheduler._get_variance(_a )
lowerCamelCase__ : List[Any] = torch.exp(0.5 * posterior_variance )
lowerCamelCase__ : Tuple = model_std * grad
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = x.detach()
lowerCamelCase__ : List[Any] = x + scale * grad
lowerCamelCase__ : Any = self.reset_xa(_a , _a , self.action_dim )
lowerCamelCase__ : str = self.unet(x.permute(0 , 2 , 1 ) , _a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowerCamelCase__ : Dict = self.scheduler.step(_a , _a , _a , predict_epsilon=_a )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
lowerCamelCase__ : str = self.reset_xa(_a , _a , self.action_dim )
lowerCamelCase__ : List[Any] = self.to_torch(_a )
return x, y
def __call__( self : Tuple , A : int , A : Any=6_4 , A : Union[str, Any]=3_2 , A : Optional[Any]=2 , A : Optional[int]=0.1 ) ->Any:
# normalize the observations and create batch dimension
lowerCamelCase__ : Any = self.normalize(_a , '''observations''' )
lowerCamelCase__ : Optional[int] = obs[None].repeat(_a , axis=0 )
lowerCamelCase__ : List[str] = {0: self.to_torch(_a )}
lowerCamelCase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowerCamelCase__ : Optional[int] = randn_tensor(_a , device=self.unet.device )
lowerCamelCase__ : int = self.reset_xa(_a , _a , self.action_dim )
lowerCamelCase__ : int = self.to_torch(_a )
# run the diffusion process
lowerCamelCase__ : List[Any] = self.run_diffusion(_a , _a , _a , _a )
# sort output trajectories by value
lowerCamelCase__ : Optional[int] = y.argsort(0 , descending=_a ).squeeze()
lowerCamelCase__ : Tuple = x[sorted_idx]
lowerCamelCase__ : Optional[int] = sorted_values[:, :, : self.action_dim]
lowerCamelCase__ : Union[str, Any] = actions.detach().cpu().numpy()
lowerCamelCase__ : Tuple = self.de_normalize(_a , key='''actions''' )
# select the action with the highest value
if y is not None:
lowerCamelCase__ : int = 0
else:
# if we didn't run value guiding, select a random action
lowerCamelCase__ : Optional[Any] = np.random.randint(0 , _a )
lowerCamelCase__ : Any = denorm_actions[selected_index, 0]
return denorm_actions
| 142 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Tuple = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = hidden_act
_A : Optional[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : int = position_embedding_type
_A : Any = use_cache
_A : int = classifier_dropout
_A : int = pre_norm
_A : Optional[Any] = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[int] = adapter_reuse_layer_norm
_A : Any = ln_before_adapter
_A : Union[str, Any] = list(_a )
_A : List[Any] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
A : Optional[Any] = 'levit'
def __init__( self , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[128, 256, 384] , SCREAMING_SNAKE_CASE__=[4, 8, 12] , SCREAMING_SNAKE_CASE__=[4, 4, 4] , SCREAMING_SNAKE_CASE__=[16, 16, 16] , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=[2, 2, 2] , SCREAMING_SNAKE_CASE__=0.02 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(**_a )
lowercase : List[Any] = image_size
lowercase : Union[str, Any] = num_channels
lowercase : Optional[Any] = kernel_size
lowercase : Optional[int] = stride
lowercase : int = padding
lowercase : Optional[int] = hidden_sizes
lowercase : List[str] = num_attention_heads
lowercase : Tuple = depths
lowercase : Any = key_dim
lowercase : Optional[Any] = drop_path_rate
lowercase : Tuple = patch_size
lowercase : Tuple = attention_ratio
lowercase : int = mlp_ratio
lowercase : Any = initializer_range
lowercase : Tuple = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
A : Dict = version.parse('1.11' )
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
| 337 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
a_ = logging.getLogger(__name__)
a_ = 'Hello world! cécé herlolip'
a_ = namedtuple(
'BertAbsConfig',
[
'temp_dir',
'large',
'use_bert_emb',
'finetune_bert',
'encoder',
'share_emb',
'max_pos',
'enc_layers',
'enc_hidden_size',
'enc_heads',
'enc_ff_size',
'enc_dropout',
'dec_layers',
'dec_hidden_size',
'dec_heads',
'dec_ff_size',
'dec_dropout',
],
)
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Optional[Any] = BertAbsConfig(
temp_dir="." , finetune_bert=snake_case_ , large=snake_case_ , share_emb=snake_case_ , use_bert_emb=snake_case_ , encoder="bert" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(snake_case_ , lambda _a , _a: storage)
SCREAMING_SNAKE_CASE : Optional[int] = AbsSummarizer(snake_case_ , torch.device("cpu") , snake_case_)
original.eval()
SCREAMING_SNAKE_CASE : List[str] = BertAbsSummarizer(snake_case_ , torch.device("cpu"))
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model")
new_model.bert.load_state_dict(original.bert.state_dict())
new_model.decoder.load_state_dict(original.decoder.state_dict())
new_model.generator.load_state_dict(original.generator.state_dict())
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical")
SCREAMING_SNAKE_CASE : List[str] = BertTokenizer.from_pretrained("bert-base-uncased")
# prepare the model inputs
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("This is sample éàalj'-.")
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_)))
SCREAMING_SNAKE_CASE : int = torch.tensor(snake_case_).unsqueeze(0)
SCREAMING_SNAKE_CASE : Any = tokenizer.encode("This is sample 3 éàalj'-.")
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case_)))
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(snake_case_).unsqueeze(0)
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight)) == 0
# forward pass
SCREAMING_SNAKE_CASE : List[str] = encoder_input_ids
SCREAMING_SNAKE_CASE : int = decoder_input_ids
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : List[str] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE : List[str] = original(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_)[0]
SCREAMING_SNAKE_CASE : List[str] = original.generator(snake_case_)
SCREAMING_SNAKE_CASE : str = new_model(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_)[0]
SCREAMING_SNAKE_CASE : Optional[Any] = new_model.generator(snake_case_)
SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case_))
SCREAMING_SNAKE_CASE : str = torch.max(torch.abs(output_converted_generator - output_original_generator)).item()
print("Maximum absolute difference beween weights: {:.2f}".format(snake_case_))
SCREAMING_SNAKE_CASE : Optional[Any] = torch.allclose(snake_case_ , snake_case_ , atol=1E-3)
if are_identical:
logging.info("all weights are equal up to 1e-3")
else:
raise ValueError("the weights are different. The new model is likely different from the original one.")
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary")
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--bertabs_checkpoint_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model.',
)
a_ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
) | 76 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case_ = "AAPL" ):
_A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" )
_A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""",class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 26 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def snake_case__ ( _A: Any=None ) -> int:
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase = subparsers.add_parser("""test""" )
else:
lowerCAmelCase = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=snake_case_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def snake_case__ ( _A: Any ) -> int:
'''simple docstring'''
lowerCAmelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
lowerCAmelCase = script_name
else:
lowerCAmelCase = f"--config_file={args.config_file} {script_name}"
lowerCAmelCase = ["""accelerate-launch"""] + test_args.split()
lowerCAmelCase = execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = test_command_parser()
lowerCAmelCase = parser.parse_args()
test_command(snake_case_ )
if __name__ == "__main__":
main()
| 272 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26 | 0 |
from math import sqrt
def lowerCamelCase__ ( a ) -> str:
_A: Any = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowerCamelCase__ ( a = 1_00_00 ) -> Any:
_A: List[Any] = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 121 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
while b:
_A , _A : List[str] = b, a % b
return a
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b )
def lowerCAmelCase_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' )
if __name__ == "__main__":
main()
| 26 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : str = torch.device('cpu')
def _a ( ) -> Dict:
'''simple docstring'''
__A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__A = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
def _a ( lowerCamelCase: List[str] ) -> str:
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def _a ( lowerCamelCase: Tuple , lowerCamelCase: Dict , lowerCamelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__A = dct.pop(snake_case_ )
__A = val
def _a ( lowerCamelCase: Tuple ) -> int:
'''simple docstring'''
__A = []
for k in state_dict.keys():
__A = k
if ".pwconv" in k:
__A = k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
__A = k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
__A = k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
__A = k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
__A = k_new.split('''.''' )
if ls[2].isdigit():
__A = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
__A = k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _a ( lowerCamelCase: List[str] , lowerCamelCase: Optional[Any] , lowerCamelCase: List[Any] ) -> List[str]:
'''simple docstring'''
__A = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__A = 10_00
__A = """huggingface/label-files"""
__A = """imagenet-1k-id2label.json"""
__A = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(snake_case_ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__A = [3, 3, 6, 4]
__A = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
__A = [3, 3, 9, 6]
__A = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
__A = [4, 3, 10, 5]
__A = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
__A = [4, 4, 12, 6]
__A = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
__A = torch.hub.load_state_dict_from_url(snake_case_ , map_location='''cpu''' , check_hash=snake_case_ )
else:
__A = torch.load(snake_case_ , map_location='''cpu''' )
__A = checkpoint
__A = create_rename_keys(snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load HuggingFace model
__A = SwiftFormerForImageClassification(snake_case_ ).eval()
hf_model.load_state_dict(snake_case_ )
# prepare test inputs
__A = prepare_img()
__A = ViTImageProcessor.from_pretrained('''preprocessor_config''' )
__A = processor(images=snake_case_ , return_tensors='''pt''' )
# compare outputs from both models
__A = get_expected_output(snake_case_ )
__A = hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case_ , atol=1e-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
snake_case__ : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 117 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
# ===== initialization =====
lowerCAmelCase__ : List[str] = Mock()
lowerCAmelCase__ : Optional[int] = conn, Mock()
lowerCAmelCase__ : Union[str, Any] = iter([1, None] )
lowerCAmelCase__ : List[str] = lambda __UpperCAmelCase : next(snake_case_ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=snake_case_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 242 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
__UpperCAmelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__UpperCAmelCase = model(_a )["""last_hidden_state"""]
__UpperCAmelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
__UpperCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 333 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_A : Optional[int] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = None
_A : int = None
_A : Tuple = None
_A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_A : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A : Dict = dataset
_A : int = name
_A : Union[str, Any] = con
_A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : str = num_proc
_A : Optional[Any] = to_sql_kwargs
def a__ ( self ) -> int:
_A : Any = self.to_sql_kwargs.pop("""sql""" , _a )
_A : List[str] = self.to_sql_kwargs.pop("""con""" , _a )
_A : int = self.to_sql_kwargs.pop("""index""" , _a )
_A : List[str] = self._write(index=_a , **self.to_sql_kwargs )
return written
def a__ ( self , _a ) -> Optional[int]:
_A , _A , _A : List[str] = args
_A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
_A : str = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_A : Tuple = batch.to_pandas()
_A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def a__ ( self , _a , **_a ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 26 | 0 |
"""simple docstring"""
def __a ( __lowerCamelCase, __lowerCamelCase ):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
UpperCAmelCase_ : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(snake_case_ ) )
return round(snake_case_, ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( UpperCamelCase__ ):
_a = "fnet"
def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : str = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : List[str] = intermediate_size
_A : List[Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : List[str] = initializer_range
_A : List[Any] = type_vocab_size
_A : List[Any] = layer_norm_eps
_A : List[str] = use_tpu_fourier_optimizations
_A : str = tpu_short_seq_length
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ :Any = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 277 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = BlipImageProcessor()
SCREAMING_SNAKE_CASE : List[Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = BlipaProcessor(_a , _a )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->List[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def __lowerCAmelCase ( self ) ->Any:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
SCREAMING_SNAKE_CASE : List[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __lowerCAmelCase ( self ) ->str:
SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = BlipaProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(_a , return_tensors='''np''' )
SCREAMING_SNAKE_CASE : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self ) ->List[Any]:
SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = BlipaProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : Optional[Any] = """lower newer"""
SCREAMING_SNAKE_CASE : Tuple = processor(text=_a )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : str = self.get_image_processor()
SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = BlipaProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : int = """lower newer"""
SCREAMING_SNAKE_CASE : List[str] = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Any = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = BlipaProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : Tuple = processor.batch_decode(_a )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __lowerCAmelCase ( self ) ->Any:
SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = BlipaProcessor(tokenizer=_a , image_processor=_a )
SCREAMING_SNAKE_CASE : Tuple = """lower newer"""
SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE : Any = processor(text=_a , images=_a )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 313 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26 | 0 |
from __future__ import annotations
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Dict:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> str:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict:
_A : str = parent
_A : int = batch_size
_A : Optional[int] = num_channels
_A : List[Any] = image_size
_A : int = min_resolution
_A : Optional[int] = max_resolution
_A : Any = do_resize
_A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
_A : Optional[int] = do_thumbnail
_A : str = do_align_axis
_A : List[Any] = do_pad
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean
_A : List[str] = image_std
def a__ ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : List[str] = DonutImageProcessingTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_thumbnail""" ) )
self.assertTrue(hasattr(_a , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def a__ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Dict:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 26 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__a = 16
__a = 32
def __lowercase ( _UpperCamelCase, _UpperCamelCase = 16 ) ->Optional[int]:
"""simple docstring"""
lowercase : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase : Any = load_dataset('''glue''', '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase : int = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase : Dict = datasets.map(
snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase : List[Any] = tokenized_datasets.rename_column('''label''', '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase : Any = 16
elif accelerator.mixed_precision != "no":
lowercase : int = 8
else:
lowercase : str = None
return tokenizer.pad(
snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', )
# Instantiate dataloaders.
lowercase : Dict = DataLoader(
tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
lowercase : str = DataLoader(
tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__a = mocked_dataloaders # noqa: F811
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int:
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1":
lowercase : Optional[Any] = 2
# New Code #
lowercase : str = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowercase : Dict = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=snake_case_ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : int = config["""lr"""]
lowercase : str = int(config['''num_epochs'''] )
lowercase : Tuple = int(config['''seed'''] )
lowercase : Optional[Any] = int(config['''batch_size'''] )
lowercase : int = evaluate.load('''glue''', '''mrpc''' )
set_seed(snake_case_ )
lowercase : Dict = get_dataloaders(snake_case_, snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : Dict = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
lowercase : Any = AdamW(params=model.parameters(), lr=snake_case_ )
# Instantiate scheduler
lowercase : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=snake_case_, num_warmup_steps=100, num_training_steps=(len(snake_case_ ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase : List[Any] = accelerator.prepare(
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ )
# Now we train the model
for epoch in range(snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(snake_case_ ):
lowercase : List[Any] = model(**snake_case_ )
lowercase : Dict = output.loss
accelerator.backward(snake_case_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase : Dict = model(**snake_case_ )
lowercase : Any = outputs.logits.argmax(dim=-1 )
lowercase : Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case_, references=snake_case_, )
lowercase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", snake_case_ )
def __lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Any = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''', )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''', type=snake_case_, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', )
parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' )
lowercase : Tuple = parser.parse_args()
lowercase : List[str] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case_, snake_case_ )
if __name__ == "__main__":
main()
| 337 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26 | 0 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a_ = 16
a_ = 32
def lowerCamelCase__ ( _a):
return int(x / 2**20)
class _UpperCamelCase :
'''simple docstring'''
def __enter__( self : Tuple ) -> Any:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
SCREAMING_SNAKE_CASE : Any = torch.cuda.memory_allocated()
return self
def __exit__( self : List[str] , *a : int ) -> List[str]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated()
SCREAMING_SNAKE_CASE : str = bamb(self.end - self.begin )
SCREAMING_SNAKE_CASE : Any = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase__ ( _a , _a = 16 , _a = "bert-base-cased" , _a = 320 , _a = 160 , ):
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(snake_case_)
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
"glue" , "mrpc" , split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"})
def tokenize_function(_a):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case_ , max_length=snake_case_)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=snake_case_)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column("label" , "labels")
def collate_fn(_a):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="max_length" , max_length=128 , return_tensors="pt")
return tokenizer.pad(snake_case_ , padding="longest" , return_tensors="pt")
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_)
SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_)
return train_dataloader, eval_dataloader
def lowerCamelCase__ ( _a , _a):
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Optional[int] = config["""lr"""]
SCREAMING_SNAKE_CASE : Tuple = int(config["num_epochs"])
SCREAMING_SNAKE_CASE : Any = int(config["seed"])
SCREAMING_SNAKE_CASE : Tuple = int(config["batch_size"])
SCREAMING_SNAKE_CASE : Any = args.model_name_or_path
set_seed(snake_case_)
SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataloaders(snake_case_ , snake_case_ , snake_case_ , args.n_train , args.n_val)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_)
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE : Tuple = optimizer_cls(params=model.parameters() , lr=snake_case_)
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
SCREAMING_SNAKE_CASE : int = 1
SCREAMING_SNAKE_CASE : str = (len(snake_case_) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE : str = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE : Dict = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_)
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE : Optional[Any] = 0
# Now we train the model
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for epoch in range(snake_case_ , snake_case_):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case_):
SCREAMING_SNAKE_CASE : Dict = model(**snake_case_)
SCREAMING_SNAKE_CASE : Dict = outputs.loss
SCREAMING_SNAKE_CASE : Dict = loss / gradient_accumulation_steps
accelerator.backward(snake_case_)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin)))
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin)))
SCREAMING_SNAKE_CASE : List[str] = tracemalloc.peaked + bamb(tracemalloc.begin)
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json") , "w") as f:
json.dump(snake_case_ , snake_case_)
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
parser.add_argument(
"--model_name_or_path" , type=snake_case_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , )
parser.add_argument(
"--output_dir" , type=snake_case_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=snake_case_ , default=snake_case_ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=snake_case_ , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=snake_case_ , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=snake_case_ , default=1 , help="Number of train epochs." , )
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(snake_case_ , snake_case_)
if __name__ == "__main__":
main() | 76 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,):
_A : Dict = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ )
_A : Tuple = Path(snake_case_ )
_A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
_A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
_A : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params
_A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_A : int = num_return_sequences
_A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_A : Optional[int] = tokenizer.model_max_length
if prefix is None:
_A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
_A : Optional[int] = SeqaSeqDataset(
snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ )
_A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn )
_A : Optional[Any] = []
for batch in tqdm(snake_case_ ):
_A : Tuple = model.generate(
input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,)
_A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
_A : Dict = batch["""ids"""]
if num_return_sequences > 1:
_A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case_,snake_case_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",)
parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" )
parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ )
parser.add_argument(
"""--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",)
parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--debug""",action="""store_true""" )
_A : Union[str, Any] = time.time()
_A , _A : List[str] = parser.parse_known_args()
_A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
_A : Dict = Path(args.save_dir + """_tmp""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
_A : int = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_A : Any = {}
if args.src_lang is not None:
_A : int = args.src_lang
if args.tgt_lang is not None:
_A : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
_A , _A : str = eval_data_dir(
args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,)
if args.local_rank <= 0:
_A : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
_A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout )
_A : Optional[int] = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
_A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_,snake_case_ )
return
_A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case_ ) as f:
_A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
_A : Dict = """translation""" in args.task
_A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
_A : Tuple = """bleu""" if calc_bleu else """rouge"""
_A : Dict = score_fn(snake_case_,snake_case_ )
_A : List[Any] = len(snake_case_ )
_A : Optional[int] = time.time() - start_time
_A : Dict = round(runtime / metrics["""n_obs"""],4 )
_A : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_,snake_case_,indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
for partial_result in partial_results:
records.extend(snake_case_ )
_A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] )
_A : List[str] = [x["""pred"""] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# WAIT FOR lots of .json files
_A : Optional[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
_A : List[str] = None
while (time.time() - start_wait) < timeout:
_A : str = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_A : List[str] = lmap(snake_case_,snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 26 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
__lowercase = None
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
__lowercase = {
'''google/fnet-base''': 5_1_2,
'''google/fnet-large''': 5_1_2,
}
__lowercase = '''▁'''
class a__( UpperCamelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[Any] = ['''input_ids''', '''token_type_ids''']
UpperCAmelCase_ : Tuple = FNetTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase="<unk>" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a)
if isinstance(_a , _a)
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
if not os.path.isdir(_a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
lowerCAmelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(_a):
copyfile(self.vocab_file , _a)
return (out_vocab_file,)
| 272 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26 | 0 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''efficientnet'''
def __init__( self : str , lowerCAmelCase_ : Any = 3 , lowerCAmelCase_ : Union[str, Any] = 6_0_0 , lowerCAmelCase_ : str = 2.0 , lowerCAmelCase_ : List[str] = 3.1 , lowerCAmelCase_ : Union[str, Any] = 8 , lowerCAmelCase_ : Union[str, Any] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase_ : Union[str, Any] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , lowerCAmelCase_ : Union[str, Any] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , lowerCAmelCase_ : str = [] , lowerCAmelCase_ : Tuple = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase_ : Any = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase_ : Dict = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : str = 0.25 , lowerCAmelCase_ : Union[str, Any] = "swish" , lowerCAmelCase_ : int = 2_5_6_0 , lowerCAmelCase_ : Any = "mean" , lowerCAmelCase_ : List[str] = 0.02 , lowerCAmelCase_ : str = 0.001 , lowerCAmelCase_ : str = 0.99 , lowerCAmelCase_ : Optional[int] = 0.5 , lowerCAmelCase_ : List[Any] = 0.2 , **lowerCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__(**_a )
_A: int = num_channels
_A: Optional[Any] = image_size
_A: int = width_coefficient
_A: Tuple = depth_coefficient
_A: Any = depth_divisor
_A: Any = kernel_sizes
_A: List[str] = in_channels
_A: str = out_channels
_A: str = depthwise_padding
_A: int = strides
_A: Optional[Any] = num_block_repeats
_A: Optional[Any] = expand_ratios
_A: List[str] = squeeze_expansion_ratio
_A: str = hidden_act
_A: Tuple = hidden_dim
_A: Optional[int] = pooling_type
_A: Any = initializer_range
_A: Dict = batch_norm_eps
_A: Tuple = batch_norm_momentum
_A: Union[str, Any] = dropout_rate
_A: Union[str, Any] = drop_connect_rate
_A: Optional[int] = sum(_a ) * 4
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
__UpperCamelCase : int = version.parse('''1.11''' )
@property
def __magic_name__ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __magic_name__ ( self : Dict ):
"""simple docstring"""
return 1e-5
| 121 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case__ : Optional[Any] = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
snake_case__ : Tuple = {
'distilbert-base-uncased': 512,
'distilbert-base-uncased-distilled-squad': 512,
'distilbert-base-cased': 512,
'distilbert-base-cased-distilled-squad': 512,
'distilbert-base-german-cased': 512,
'distilbert-base-multilingual-cased': 512,
}
snake_case__ : List[Any] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class A_ ( UpperCamelCase__ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = DistilBertTokenizer
def __init__(self :Dict , _UpperCamelCase :Optional[int]=None , _UpperCamelCase :List[Any]=None , _UpperCamelCase :Any=True , _UpperCamelCase :Optional[int]="[UNK]" , _UpperCamelCase :List[Any]="[SEP]" , _UpperCamelCase :str="[PAD]" , _UpperCamelCase :Optional[Any]="[CLS]" , _UpperCamelCase :List[Any]="[MASK]" , _UpperCamelCase :Optional[Any]=True , _UpperCamelCase :Dict=None , **_UpperCamelCase :Optional[Any] , )-> Tuple:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
__A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _a ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _a ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _a ) != tokenize_chinese_chars
):
__A = getattr(_a , normalizer_state.pop('''type''' ) )
__A = do_lower_case
__A = strip_accents
__A = tokenize_chinese_chars
__A = normalizer_class(**_a )
__A = do_lower_case
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Any=None )-> Dict:
__A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Tuple , _UpperCamelCase :List[str] = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Tuple , _UpperCamelCase :Optional[int] = None )-> Tuple[str]:
__A = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 117 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ):
_A : Union[str, Any] = []
for k, v in d.items():
_A : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
_A : List[Any] = argparse.Namespace()
with open(snake_case_,"""r""" ) as yaml_file:
try:
_A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader )
_A : Optional[int] = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_,snake_case_,snake_case_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = MobileViTVaConfig()
_A : Tuple = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_A : Dict = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : int = 384
else:
_A : int = 256
_A : List[str] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_A : Union[str, Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : str = 384
else:
_A : List[Any] = 256
_A : List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_A : int = 151
_A : int = 512
_A : Optional[int] = """ade20k-id2label.json"""
_A : Any = True
elif task_name.startswith("""voc_""" ):
_A : List[Any] = 21
_A : Dict = 512
_A : Dict = """pascal-voc-id2label.json"""
_A : int = True
# orig_config
_A : Any = load_orig_config_file(snake_case_ )
assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model"
_A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 )
assert (
getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 )
if "_deeplabv3" in task_name:
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] )
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 )
_A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 )
# id2label
_A : List[Any] = """huggingface/label-files"""
_A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
if base_model:
_A : Optional[int] = """"""
else:
_A : Dict = """mobilevitv2."""
_A : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A : Any = k[8:]
else:
_A : List[str] = k
if ".block." in k:
_A : Any = k_new.replace(""".block.""",""".""" )
if ".conv." in k:
_A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" )
if ".norm." in k:
_A : Any = k_new.replace(""".norm.""",""".normalization.""" )
if "conv_1." in k:
_A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" )
if ".red_1x1." in k:
_A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
_A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
_A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
_A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_A : Optional[int] = [0, 1]
elif i == 4:
_A : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_A : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
_A : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" )
if "pre_norm_ffn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" )
if "classifier.1." in k:
_A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" )
if "seg_head." in k:
_A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" )
if ".aspp_layer." in k:
_A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" )
if ".aspp_pool." in k:
_A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
_A : str = False
else:
_A : int = MobileViTVaForImageClassification(snake_case_ ).eval()
_A : List[Any] = False
# remove and rename some keys of load the original model
_A : List[Any] = checkpoint
remove_unused_keys(snake_case_ )
_A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 )
_A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[Any] = model(**snake_case_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_A : List[Any] = outputs.logits
_A : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""",model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 26 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( UpperCamelCase__ ):
_lowerCamelCase :List[Any] = (UnCLIPScheduler,)
def _lowerCAmelCase ( self : Optional[int] , **UpperCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = {
"""num_train_timesteps""": 10_00,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**_a )
return config
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def _lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowerCAmelCase__ : Optional[Any] = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0E-1_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : List[str] = self.get_scheduler_config(variance_type="""learned_range""" )
lowerCAmelCase__ : Optional[Any] = scheduler_class(**_a )
lowerCAmelCase__ : str = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Tuple = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**_a )
lowerCAmelCase__ : Optional[int] = scheduler.timesteps
lowerCAmelCase__ : Union[str, Any] = self.dummy_model()
lowerCAmelCase__ : Any = self.dummy_sample_deter
lowerCAmelCase__ : List[str] = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
lowerCAmelCase__ : Dict = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : List[Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
lowerCAmelCase__ : Any = pred_prev_sample
lowerCAmelCase__ : int = torch.sum(torch.abs(_a ) )
lowerCAmelCase__ : Optional[int] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def _lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**_a )
scheduler.set_timesteps(25 )
lowerCAmelCase__ : List[str] = scheduler.timesteps
lowerCAmelCase__ : str = self.dummy_model()
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase__ : str = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(_a , _a )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase__ : Dict = None
else:
lowerCAmelCase__ : Optional[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Any = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
lowerCAmelCase__ : int = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(_a ) )
lowerCAmelCase__ : int = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
| 242 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A_ : int = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 1
@register_to_config
def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]:
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
def a__ ( self , _a , _a = None ) -> Union[str, Any]:
_A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def a__ ( self , _a , _a , _a , _a=None ) -> Dict:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_A : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_A : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
_A : List[Any] = std.unsqueeze(-1 )
_A : int = -score / std
# compute
_A : Tuple = -1.0 / len(self.timesteps )
_A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_A : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_A : Union[str, Any] = beta_t.unsqueeze(-1 )
_A : Tuple = -0.5 * beta_t * x
_A : Tuple = torch.sqrt(_a )
_A : Dict = drift - diffusion**2 * score
_A : Dict = x + drift * dt
# add noise
_A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
_A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 26 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = 100, ):
UpperCAmelCase_ : List[str] = x_start
UpperCAmelCase_ : List[str] = fnc(snake_case_ )
UpperCAmelCase_ : Tuple = 0.0
for _ in range(snake_case_ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCAmelCase_ : List[Any] = (x_end - x_start) / steps + xa
UpperCAmelCase_ : Any = fnc(snake_case_ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCAmelCase_ : Optional[Any] = xa
UpperCAmelCase_ : Any = fxa
return area
if __name__ == "__main__":
def __a ( __lowerCamelCase ):
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
_a = 10
while i <= 100_000:
print(f"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""")
i *= 10
| 61 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
_snake_case = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
_snake_case = "▁"
class lowercase ( UpperCamelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "token_type_ids"]
_a = FNetTokenizer
def __init__( self , _a=None , _a=None , _a=False , _a=True , _a=True , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A : int = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
_A : Optional[int] = do_lower_case
_A : List[Any] = remove_space
_A : str = keep_accents
_A : int = vocab_file
_A : int = False if not self.vocab_file else True
def a__ ( self , _a , _a = None ) -> List[int]:
_A : str = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self , _a , _a = None ) -> List[int]:
_A : Any = [self.sep_token_id]
_A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A : List[str] = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 26 | 0 |
from manim import *
class snake_case__ ( UpperCamelCase__ ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) ->Dict:
snake_case__ : List[Any] = Rectangle(height=0.5, width=0.5 )
snake_case__ : Optional[int] = Rectangle(height=0.4_6, width=0.4_6 ).set_stroke(width=0 )
snake_case__ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case__ : List[Any] = [mem.copy() for i in range(6 )]
snake_case__ : Optional[Any] = VGroup(*_a ).arrange(_a, buff=0 )
snake_case__ : Any = VGroup(*_a ).arrange(_a, buff=0 )
snake_case__ : Optional[int] = VGroup(_a, _a ).arrange(_a, buff=0 )
snake_case__ : Optional[Any] = Text('CPU', font_size=2_4 )
snake_case__ : List[str] = Group(_a, _a ).arrange(_a, buff=0.5, aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
snake_case__ : Optional[Any] = [mem.copy() for i in range(4 )]
snake_case__ : Optional[int] = VGroup(*_a ).arrange(_a, buff=0 )
snake_case__ : Optional[Any] = Text('GPU', font_size=2_4 )
snake_case__ : Union[str, Any] = Group(_a, _a ).arrange(_a, buff=0.5, aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
snake_case__ : str = [mem.copy() for i in range(6 )]
snake_case__ : int = VGroup(*_a ).arrange(_a, buff=0 )
snake_case__ : str = Text('Model', font_size=2_4 )
snake_case__ : List[Any] = Group(_a, _a ).arrange(_a, buff=0.5, aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
snake_case__ : List[str] = []
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
snake_case__ : str = Rectangle(height=0.4_6 / 4, width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(_a, opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.0_2, direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0], direction=_a, buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1], direction=_a, buff=0.0 )
self.add(_a )
cpu_targs.append(_a )
snake_case__ : Union[str, Any] = [mem.copy() for i in range(6 )]
snake_case__ : str = VGroup(*_a ).arrange(_a, buff=0 )
snake_case__ : List[str] = Text('Loaded Checkpoint', font_size=2_4 )
snake_case__ : Optional[int] = Group(_a, _a ).arrange(_a, aligned_edge=_a, buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
snake_case__ : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case__ : int = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''', font_size=1_8, )
key_text.move_to([-5, 2.4, 0] )
self.add(_a, _a )
snake_case__ : List[str] = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''', font_size=1_8, )
blue_text.next_to(_a, DOWN * 2.4, aligned_edge=key_text.get_left() )
snake_case__ : Optional[int] = MarkupText(
F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''', font_size=2_4, )
step_a.move_to([2, 2, 0] )
self.play(Write(_a ), Write(_a ) )
self.play(Write(_a, run_time=1 ), Create(_a, run_time=1 ) )
snake_case__ : Optional[int] = []
snake_case__ : Dict = []
for i, rect in enumerate(_a ):
snake_case__ : int = fill.copy().set_fill(_a, opacity=0.7 )
target.move_to(_a )
first_animations.append(GrowFromCenter(_a, run_time=1 ) )
snake_case__ : Optional[Any] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_a, run_time=1.5 ) )
self.play(*_a )
self.play(*_a )
self.wait()
| 277 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_snake_case = 6_3_7_8_1_3_7.0
_snake_case = 6_3_5_6_7_5_2.3_1_4_2_4_5
_snake_case = 6378137
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : Any = (AXIS_A - AXIS_B) / AXIS_A
_A : Optional[int] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : List[str] = atan((1 - flattening) * tan(radians(snake_case_ ) ) )
_A : Optional[Any] = radians(snake_case_ )
_A : str = radians(snake_case_ )
# Equation
_A : Dict = sin((phi_a - phi_a) / 2 )
_A : List[str] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
_A : Optional[int] = sqrt(sin_sq_phi + (cos(snake_case_ ) * cos(snake_case_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Union[str, Any] = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 313 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> Optional[int]:
super().__init__(_a )
_A : Union[str, Any] = RobertaEmbeddings(_a )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ",UpperCamelCase__,)
class lowercase ( UpperCamelCase__ ):
_a = RobertaConfig
_a = "roberta"
def __init__( self , _a ) -> str:
super().__init__(_a )
_A : Any = config.num_labels
_A : Dict = config.num_hidden_layers
_A : List[str] = DeeRobertaModel(_a )
_A : int = nn.Dropout(config.hidden_dropout_prob )
_A : int = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_a )
def a__ ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=-1 , _a=False , ) -> Any:
_A : Optional[int] = self.num_layers
try:
_A : List[str] = self.roberta(
_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , )
_A : List[str] = outputs[1]
_A : List[str] = self.dropout(_a )
_A : Optional[Any] = self.classifier(_a )
_A : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_A : List[Any] = e.message
_A : Optional[int] = e.exit_layer
_A : Optional[int] = outputs[0]
if not self.training:
_A : int = entropy(_a )
_A : int = []
_A : int = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_A : Union[str, Any] = MSELoss()
_A : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_A : Optional[Any] = []
for highway_exit in outputs[-1]:
_A : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_a )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_A : List[str] = MSELoss()
_A : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_A : List[Any] = CrossEntropyLoss()
_A : Tuple = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_a )
if train_highway:
_A : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_A : int = (loss,) + outputs
if not self.training:
_A : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_A : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 26 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_A : Union[str, Any] = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Dict = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 142 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowercase ( UpperCamelCase__ ):
_a = "xmod"
def __init__( self , _a=3_0522 , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , _a=False , _a=2 , _a=False , _a=True , _a=True , _a=("en_XX",) , _a=None , **_a , ) -> str:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Tuple = vocab_size
_A : Union[str, Any] = hidden_size
_A : Dict = num_hidden_layers
_A : Dict = num_attention_heads
_A : List[Any] = hidden_act
_A : Optional[Any] = intermediate_size
_A : Any = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Dict = max_position_embeddings
_A : Any = type_vocab_size
_A : List[Any] = initializer_range
_A : int = layer_norm_eps
_A : int = position_embedding_type
_A : Any = use_cache
_A : int = classifier_dropout
_A : int = pre_norm
_A : Optional[Any] = adapter_reduction_factor
_A : List[Any] = adapter_layer_norm
_A : Optional[int] = adapter_reuse_layer_norm
_A : Any = ln_before_adapter
_A : Union[str, Any] = list(_a )
_A : List[Any] = default_language
class lowercase ( UpperCamelCase__ ):
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 26 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
def run_func(_UpperCamelCase ):
@wraps(snake_case_ )
def run_in_eager_mode(*_UpperCamelCase, **_UpperCamelCase ):
return func(*snake_case_, **snake_case_ )
@wraps(snake_case_ )
@tf.function(experimental_compile=snake_case_ )
def run_in_graph_mode(*_UpperCamelCase, **_UpperCamelCase ):
return func(*snake_case_, **snake_case_ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : List[Any] = random.Random()
lowercase : Optional[int] = [rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case_, shape=(batch_size, sequence_length), dtype=tf.intaa )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ):
A : Optional[int] = 42
A : Tuple = 42
A : int = 'TensorFlow'
@property
def __lowerCamelCase ( self ):
return tf.__version__
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# initialize GPU on separate process
lowercase : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowercase : Optional[Any] = self._prepare_inference_func(_a , _a , _a )
return self._measure_speed(_inference )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowercase : Dict = self._prepare_train_func(_a , _a , _a )
return self._measure_speed(_train )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a )
lowercase : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowercase : Dict = self._prepare_inference_func(_a , _a , _a )
return self._measure_memory(_inference )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _a )
lowercase : str = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowercase : str = self._prepare_train_func(_a , _a , _a )
return self._measure_memory(_train )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowercase : List[Any] = (
hasattr(_a , '''architectures''' )
and isinstance(config.architectures , _a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase : Dict = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase : Tuple = __import__('''transformers''' , fromlist=[model_class] )
lowercase : Tuple = getattr(_a , _a )
lowercase : Optional[int] = model_cls(_a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowercase : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](_a )
# encoder-decoder has vocab size saved differently
lowercase : Any = config.vocab_size if hasattr(_a , '''vocab_size''' ) else config.encoder.vocab_size
lowercase : Dict = random_input_ids(_a , _a , _a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(_a , decoder_input_ids=_a , training=_a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(_a , training=_a )
lowercase : str = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowercase : str = (
hasattr(_a , '''architectures''' )
and isinstance(config.architectures , _a )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowercase : List[str] = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
lowercase : Union[str, Any] = __import__('''transformers''' , fromlist=[model_class] )
lowercase : Any = getattr(_a , _a )
lowercase : List[Any] = model_cls(_a )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowercase : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_a )
# encoder-decoder has vocab size saved differently
lowercase : str = config.vocab_size if hasattr(_a , '''vocab_size''' ) else config.encoder.vocab_size
lowercase : Optional[int] = random_input_ids(_a , _a , _a )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowercase : str = model(_a , decoder_input_ids=_a , labels=_a , training=_a )[0]
lowercase : List[str] = tf.gradients(_a , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowercase : str = model(_a , labels=_a , training=_a )[0]
lowercase : Tuple = tf.gradients(_a , model.trainable_variables )
return gradients
lowercase : List[str] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(_a , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowercase : Union[str, Any] = timeit.repeat(
_a , repeat=self.args.repeat , number=10 , )
return min(_a ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn\'t fit on GPU. {e}""" )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowercase : int = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowercase : List[str] = """N/A"""
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowercase : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowercase : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(_a )
lowercase : int = meminfo.used
lowercase : Tuple = Memory(_a )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowercase : Optional[Any] = None
else:
lowercase : Tuple = measure_peak_memory_cpu(_a )
lowercase : int = Memory(_a ) if isinstance(_a , _a ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowercase : Optional[int] = stop_memory_tracing(_a )
if memory is None:
lowercase : Tuple = summary.total
else:
lowercase : Optional[Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn\'t fit on GPU. {e}""" )
return "N/A", None
| 337 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def lowerCamelCase__ ( _a , _a , _a , _a):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.")
if tokenizer_name is None:
SCREAMING_SNAKE_CASE : List[str] = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {tokenizer_name: getattr(snake_case_ , tokenizer_name + "Fast")}
logger.info(f"Loading tokenizer classes: {tokenizer_names}")
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE : Tuple = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE : Optional[int] = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE : int = list(tokenizer_class.max_model_input_sizes.keys())
else:
SCREAMING_SNAKE_CASE : Dict = [checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}")
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}")
# Load tokenizer
SCREAMING_SNAKE_CASE : List[Any] = tokenizer_class.from_pretrained(snake_case_ , force_download=snake_case_)
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}")
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE : Any = checkpoint.split("/")
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(snake_case_ , snake_case_)
elif add_prefix:
SCREAMING_SNAKE_CASE : Optional[int] = checkpoint
SCREAMING_SNAKE_CASE : List[str] = dump_path
else:
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : int = dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}")
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]:
SCREAMING_SNAKE_CASE : Tuple = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint]
SCREAMING_SNAKE_CASE : List[Any] = file_path.split(snake_case_)[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(snake_case_ , snake_case_)
SCREAMING_SNAKE_CASE : List[str] = None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}")
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.save_pretrained(
snake_case_ , legacy_format=snake_case_ , filename_prefix=snake_case_)
logger.info(f"=> File names {file_names}")
for file_name in file_names:
if not file_name.endswith("tokenizer.json"):
os.remove(snake_case_)
logger.info(f"=> removing {file_name}")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
a_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) | 76 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case_ = "AAPL" ):
_A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" )
_A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""",class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 26 | 0 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__lowercase = logging.getLogger(__name__)
class a__:
'''simple docstring'''
def __init__( self):
"""simple docstring"""
lowerCAmelCase = False
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if not self.initialized:
lowerCAmelCase = RagRetriever(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
lowerCAmelCase = True
def a_ ( self):
"""simple docstring"""
self.retriever.index.init_index()
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = self.retriever._main_retrieve(_a , _a)
return doc_ids, retrieved_doc_embeds
class a__( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None):
"""simple docstring"""
if index is not None and index.is_initialized() and len(_a) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """)
super().__init__(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
lowerCAmelCase = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_a , _a , _a , _a)
for worker in self.retrieval_workers
])
def a_ ( self):
"""simple docstring"""
logger.info("""initializing retrieval""")
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
lowerCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
lowerCAmelCase = ray.get(random_worker.retrieve.remote(_a , _a))
else:
lowerCAmelCase = self._main_retrieve(_a , _a)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_a)
@classmethod
def a_ ( cls , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
return super(_a , cls).get_tokenizers(_a , _a , **_a)
@classmethod
def a_ ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = kwargs.pop("""config""" , _a) or RagConfig.from_pretrained(_a , **_a)
lowerCAmelCase = RagTokenizer.from_pretrained(_a , config=_a)
lowerCAmelCase = rag_tokenizer.question_encoder
lowerCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
lowerCAmelCase = """custom"""
lowerCAmelCase = CustomHFIndex(config.retrieval_vector_size , _a)
else:
lowerCAmelCase = cls._build_index(_a)
return cls(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , retrieval_workers=_a , index=_a , )
| 272 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __get__( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str]=None ):
"""simple docstring"""
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
_A: List[str] = """__cached_""" + self.fget.__name__
_A: Any = getattr(_a , _a , _a )
if cached is None:
_A: Dict = self.fget(_a )
setattr(_a , _a , _a )
return cached
def lowerCamelCase__ ( a ) -> List[str]:
_A: Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def lowerCamelCase__ ( a ) -> Optional[Any]:
if is_torch_fx_proxy(snake_case_ ):
return True
if is_torch_available():
import torch
if isinstance(snake_case_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(snake_case_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(snake_case_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(snake_case_ , np.ndarray )
def lowerCamelCase__ ( a ) -> Optional[Any]:
return isinstance(snake_case_ , np.ndarray )
def lowerCamelCase__ ( a ) -> int:
return _is_numpy(snake_case_ )
def lowerCamelCase__ ( a ) -> Optional[Any]:
import torch
return isinstance(snake_case_ , torch.Tensor )
def lowerCamelCase__ ( a ) -> List[str]:
return False if not is_torch_available() else _is_torch(snake_case_ )
def lowerCamelCase__ ( a ) -> List[Any]:
import torch
return isinstance(snake_case_ , torch.device )
def lowerCamelCase__ ( a ) -> Optional[int]:
return False if not is_torch_available() else _is_torch_device(snake_case_ )
def lowerCamelCase__ ( a ) -> Optional[Any]:
import torch
if isinstance(snake_case_ , snake_case_ ):
if hasattr(snake_case_ , snake_case_ ):
_A: Optional[int] = getattr(snake_case_ , snake_case_ )
else:
return False
return isinstance(snake_case_ , torch.dtype )
def lowerCamelCase__ ( a ) -> Dict:
return False if not is_torch_available() else _is_torch_dtype(snake_case_ )
def lowerCamelCase__ ( a ) -> str:
import tensorflow as tf
return isinstance(snake_case_ , tf.Tensor )
def lowerCamelCase__ ( a ) -> Dict:
return False if not is_tf_available() else _is_tensorflow(snake_case_ )
def lowerCamelCase__ ( a ) -> Optional[Any]:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(snake_case_ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(snake_case_ )
return type(snake_case_ ) == tf.Tensor
def lowerCamelCase__ ( a ) -> Optional[Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(snake_case_ )
def lowerCamelCase__ ( a ) -> Optional[Any]:
import jax.numpy as jnp # noqa: F811
return isinstance(snake_case_ , jnp.ndarray )
def lowerCamelCase__ ( a ) -> Tuple:
return False if not is_flax_available() else _is_jax(snake_case_ )
def lowerCamelCase__ ( a ) -> str:
if isinstance(snake_case_ , (dict, UserDict) ):
return {k: to_py_obj(snake_case_ ) for k, v in obj.items()}
elif isinstance(snake_case_ , (list, tuple) ):
return [to_py_obj(snake_case_ ) for o in obj]
elif is_tf_tensor(snake_case_ ):
return obj.numpy().tolist()
elif is_torch_tensor(snake_case_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(snake_case_ ):
return np.asarray(snake_case_ ).tolist()
elif isinstance(snake_case_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase__ ( a ) -> Dict:
if isinstance(snake_case_ , (dict, UserDict) ):
return {k: to_numpy(snake_case_ ) for k, v in obj.items()}
elif isinstance(snake_case_ , (list, tuple) ):
return np.array(snake_case_ )
elif is_tf_tensor(snake_case_ ):
return obj.numpy()
elif is_torch_tensor(snake_case_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(snake_case_ ):
return np.asarray(snake_case_ )
else:
return obj
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
_A: int = fields(self )
# Safety and consistency checks
if not len(_a ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
_A: str = getattr(self , class_fields[0].name )
_A: str = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_a ):
if isinstance(_a , _a ):
_A: int = first_field.items()
_A: Dict = True
else:
try:
_A: List[Any] = iter(_a )
_A: List[Any] = True
except TypeError:
_A: Dict = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_a ):
if (
not isinstance(_a , (list, tuple) )
or not len(_a ) == 2
or not isinstance(element[0] , _a )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_A: Dict = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_A: Any = element[1]
elif first_field is not None:
_A: List[str] = first_field
else:
for field in class_fields:
_A: Any = getattr(self , field.name )
if v is not None:
_A: int = v
def __delitem__( self : Any , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : str ):
"""simple docstring"""
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __magic_name__ ( self : List[str] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ):
"""simple docstring"""
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __magic_name__ ( self : int , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Tuple ):
"""simple docstring"""
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __magic_name__ ( self : int , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Dict , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
if isinstance(_a , _a ):
_A: str = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_a , _a )
super().__setattr__(_a , _a )
def __setitem__( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ):
"""simple docstring"""
# Will raise a KeyException if needed
super().__setitem__(_a , _a )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_a , _a )
def __magic_name__ ( self : str ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
@classmethod
def __magic_name__ ( cls : List[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''longest'''
__UpperCamelCase : Dict = '''max_length'''
__UpperCamelCase : List[Any] = '''do_not_pad'''
class UpperCAmelCase ( UpperCamelCase__ ):
'''simple docstring'''
__UpperCamelCase : Dict = '''pt'''
__UpperCamelCase : int = '''tf'''
__UpperCamelCase : Union[str, Any] = '''np'''
__UpperCamelCase : str = '''jax'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : int , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
_A: List[Any] = context_managers
_A: List[Any] = ExitStack()
def __enter__( self : int ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(_a )
def __exit__( self : List[Any] , *lowerCAmelCase_ : Any , **lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
self.stack.__exit__(*_a , **_a )
def lowerCamelCase__ ( a ) -> Union[str, Any]:
_A: List[Any] = infer_framework(snake_case_ )
if framework == "tf":
_A: str = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_A: Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_A: int = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: Dict = model_class.__name__
_A: Optional[Any] = infer_framework(snake_case_ )
if framework == "tf":
_A: Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_A: Optional[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_A: Optional[Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase__ ( a , a = "" , a = "." ) -> Any:
def _flatten_dict(a , a="" , a="." ):
for k, v in d.items():
_A: Dict = str(snake_case_ ) + delimiter + str(snake_case_ ) if parent_key else k
if v and isinstance(snake_case_ , snake_case_ ):
yield from flatten_dict(snake_case_ , snake_case_ , delimiter=snake_case_ ).items()
else:
yield key, v
return dict(_flatten_dict(snake_case_ , snake_case_ , snake_case_ ) )
@contextmanager
def lowerCamelCase__ ( a , a = False ) -> Optional[Any]:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase__ ( a , a=None ) -> int:
if is_numpy_array(snake_case_ ):
return np.transpose(snake_case_ , axes=snake_case_ )
elif is_torch_tensor(snake_case_ ):
return array.T if axes is None else array.permute(*snake_case_ )
elif is_tf_tensor(snake_case_ ):
import tensorflow as tf
return tf.transpose(snake_case_ , perm=snake_case_ )
elif is_jax_tensor(snake_case_ ):
return jnp.transpose(snake_case_ , axes=snake_case_ )
else:
raise ValueError(f"""Type not supported for transpose: {type(snake_case_ )}.""" )
def lowerCamelCase__ ( a , a ) -> Dict:
if is_numpy_array(snake_case_ ):
return np.reshape(snake_case_ , snake_case_ )
elif is_torch_tensor(snake_case_ ):
return array.reshape(*snake_case_ )
elif is_tf_tensor(snake_case_ ):
import tensorflow as tf
return tf.reshape(snake_case_ , snake_case_ )
elif is_jax_tensor(snake_case_ ):
return jnp.reshape(snake_case_ , snake_case_ )
else:
raise ValueError(f"""Type not supported for reshape: {type(snake_case_ )}.""" )
def lowerCamelCase__ ( a , a=None ) -> Optional[int]:
if is_numpy_array(snake_case_ ):
return np.squeeze(snake_case_ , axis=snake_case_ )
elif is_torch_tensor(snake_case_ ):
return array.squeeze() if axis is None else array.squeeze(dim=snake_case_ )
elif is_tf_tensor(snake_case_ ):
import tensorflow as tf
return tf.squeeze(snake_case_ , axis=snake_case_ )
elif is_jax_tensor(snake_case_ ):
return jnp.squeeze(snake_case_ , axis=snake_case_ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(snake_case_ )}.""" )
def lowerCamelCase__ ( a , a ) -> Tuple:
if is_numpy_array(snake_case_ ):
return np.expand_dims(snake_case_ , snake_case_ )
elif is_torch_tensor(snake_case_ ):
return array.unsqueeze(dim=snake_case_ )
elif is_tf_tensor(snake_case_ ):
import tensorflow as tf
return tf.expand_dims(snake_case_ , axis=snake_case_ )
elif is_jax_tensor(snake_case_ ):
return jnp.expand_dims(snake_case_ , axis=snake_case_ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(snake_case_ )}.""" )
def lowerCamelCase__ ( a ) -> List[str]:
if is_numpy_array(snake_case_ ):
return np.size(snake_case_ )
elif is_torch_tensor(snake_case_ ):
return array.numel()
elif is_tf_tensor(snake_case_ ):
import tensorflow as tf
return tf.size(snake_case_ )
elif is_jax_tensor(snake_case_ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(snake_case_ )}.""" )
def lowerCamelCase__ ( a , a ) -> Tuple:
for key, value in auto_map.items():
if isinstance(snake_case_ , (tuple, list) ):
_A: int = [f"""{repo_id}--{v}""" if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
_A: List[str] = f"""{repo_id}--{value}"""
return auto_map
def lowerCamelCase__ ( a ) -> List[str]:
for base_class in inspect.getmro(snake_case_ ):
_A: Optional[Any] = base_class.__module__
_A: Tuple = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" )
| 121 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
while b:
_A , _A : List[str] = b, a % b
return a
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return a if b == 0 else euclidean_gcd_recursive(snake_case_,a % b )
def lowerCAmelCase_ ( ):
print(f'''euclidean_gcd(3, 5) = {euclidean_gcd(3,5 )}''' )
print(f'''euclidean_gcd(5, 3) = {euclidean_gcd(5,3 )}''' )
print(f'''euclidean_gcd(1, 3) = {euclidean_gcd(1,3 )}''' )
print(f'''euclidean_gcd(3, 6) = {euclidean_gcd(3,6 )}''' )
print(f'''euclidean_gcd(6, 3) = {euclidean_gcd(6,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3,5 )}''' )
print(f'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5,3 )}''' )
print(f'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1,3 )}''' )
print(f'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3,6 )}''' )
print(f'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6,3 )}''' )
if __name__ == "__main__":
main()
| 26 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : List[Any] = logging.get_logger(__name__)
def _a ( lowerCamelCase: List[str] , lowerCamelCase: List[Any]=False ) -> Tuple:
'''simple docstring'''
__A = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__A = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _a ( lowerCamelCase: Dict , lowerCamelCase: str , lowerCamelCase: List[str]=False ) -> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__A = """"""
else:
__A = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__A = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__A = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[
: config.hidden_size, :
]
__A = in_proj_bias[: config.hidden_size]
__A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__A = in_proj_weight[
-config.hidden_size :, :
]
__A = in_proj_bias[-config.hidden_size :]
def _a ( lowerCamelCase: int ) -> str:
'''simple docstring'''
__A = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def _a ( lowerCamelCase: int , lowerCamelCase: Optional[Any] , lowerCamelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
__A = dct.pop(snake_case_ )
__A = val
def _a ( ) -> Tuple:
'''simple docstring'''
__A = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__A = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def _a ( lowerCamelCase: Tuple , lowerCamelCase: Any , lowerCamelCase: List[str]=True ) -> Dict:
'''simple docstring'''
__A = ViTConfig()
# patch_size
if model_name[-1] == "8":
__A = 8
# set labels if required
if not base_model:
__A = 10_00
__A = """huggingface/label-files"""
__A = """imagenet-1k-id2label.json"""
__A = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) )
__A = {int(snake_case_ ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__A = 3_84
__A = 15_36
__A = 12
__A = 6
# load original model from torch hub
__A = torch.hub.load('''facebookresearch/dino:main''' , snake_case_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__A = original_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
__A = create_rename_keys(snake_case_ , base_model=snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ )
# load HuggingFace model
if base_model:
__A = ViTModel(snake_case_ , add_pooling_layer=snake_case_ ).eval()
else:
__A = ViTForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by ViTImageProcessor
__A = ViTImageProcessor()
__A = image_processor(images=prepare_img() , return_tensors='''pt''' )
__A = encoding["""pixel_values"""]
__A = model(snake_case_ )
if base_model:
__A = original_model(snake_case_ )
assert torch.allclose(snake_case_ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__A = original_model(snake_case_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1e-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
snake_case__ : int = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 117 |
def lowerCAmelCase_ ( snake_case_ ):
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 242 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowerCAmelCase_ ( snake_case_ ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_A : str = k.replace(snake_case_,snake_case_ )
if k.startswith("""encoder""" ):
_A : Optional[Any] = k.replace(""".attn""",""".self_attn""" )
_A : Dict = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Optional[Any] = k.replace("""norm2""","""final_layer_norm""" )
elif k.startswith("""decoder""" ):
_A : str = k.replace("""norm1""","""self_attn_layer_norm""" )
_A : Any = k.replace("""norm2""","""encoder_attn_layer_norm""" )
_A : Optional[int] = k.replace("""norm3""","""final_layer_norm""" )
return k
def lowerCAmelCase_ ( snake_case_ ):
_A : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
_A : str = sd.pop(snake_case_ )
_A : Optional[int] = k.replace("""layernorm_embedding""","""layer_norm""" )
assert new_k not in sd
_A : Optional[int] = v
_snake_case = ["START"]
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
_A : List[Any] = model["""model"""]
_A : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
_A : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
_A : Tuple = m.model.state_dict().keys()
_A : Any = []
_A : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_A : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_A : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_,strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_snake_case = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
A_ : Optional[Any] = TypeVar('T')
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__(self , lowercase__ ) -> str:
__UpperCAmelCase = data
__UpperCAmelCase = None
def __str__(self ) -> str:
return F'''{self.data}'''
class A_ ( Generic[T] ):
'''simple docstring'''
def __init__(self ) -> None:
__UpperCAmelCase = None
def __iter__(self ) -> Iterator[T]:
__UpperCAmelCase = self.top
while node:
yield node.data
__UpperCAmelCase = node.next
def __str__(self ) -> str:
return "->".join([str(_a ) for item in self] )
def __len__(self ) -> int:
return len(tuple(iter(self ) ) )
def lowerCAmelCase_ (self ) -> bool:
return self.top is None
def lowerCAmelCase_ (self , lowercase__ ) -> None:
__UpperCAmelCase = Node(_a )
if not self.is_empty():
__UpperCAmelCase = self.top
__UpperCAmelCase = node
def lowerCAmelCase_ (self ) -> T:
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , _a )
__UpperCAmelCase = self.top
__UpperCAmelCase = self.top.next
return pop_node.data
def lowerCAmelCase_ (self ) -> T:
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def lowerCAmelCase_ (self ) -> None:
__UpperCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 333 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a = None , _a = None , _a = False , **_a , ) -> int:
super().__init__(features=_a , cache_dir=_a , keep_in_memory=_a , **_a )
_A : Optional[int] = Sql(
cache_dir=_a , features=_a , sql=_a , con=_a , **_a , )
def a__ ( self ) -> Optional[Any]:
_A : Tuple = None
_A : int = None
_A : Tuple = None
_A : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , )
# Build dataset for splits
_A : int = self.builder.as_dataset(
split="""train""" , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self , _a , _a , _a , _a = None , _a = None , **_a , ) -> Union[str, Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A : Dict = dataset
_A : int = name
_A : Union[str, Any] = con
_A : str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A : str = num_proc
_A : Optional[Any] = to_sql_kwargs
def a__ ( self ) -> int:
_A : Any = self.to_sql_kwargs.pop("""sql""" , _a )
_A : List[str] = self.to_sql_kwargs.pop("""con""" , _a )
_A : int = self.to_sql_kwargs.pop("""index""" , _a )
_A : List[str] = self._write(index=_a , **self.to_sql_kwargs )
return written
def a__ ( self , _a ) -> Optional[int]:
_A , _A , _A : List[str] = args
_A : int = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
_A : str = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
_A : Tuple = batch.to_pandas()
_A : Union[str, Any] = df.to_sql(self.name , self.con , index=_a , **_a )
return num_rows or len(_a )
def a__ ( self , _a , **_a ) -> int:
_A : Any = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_A , _A : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 26 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
_a = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class A_ (unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TOKEN
HfFolder.save_token(_a )
@classmethod
def UpperCamelCase__ ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : Dict = FlaxBertModel(_a )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(_a , repo_id="test-model-flax" , push_to_hub=_a , use_auth_token=self._token )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
UpperCAmelCase_ : str = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=F"""{key} not identical""" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase_ : List[str] = FlaxBertModel(_a )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
UpperCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
UpperCAmelCase_ : str = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Optional[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
_a , repo_id="valid_org/test-model-flax-org" , push_to_hub=_a , use_auth_token=self._token )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
UpperCAmelCase_ : List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(_a , 1E-3 , msg=F"""{key} not identical""" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = True
UpperCAmelCase_ : Union[str, Any] = flatten_dict(modela.params )
UpperCAmelCase_ : Optional[Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCAmelCase_ : List[str] = False
return models_are_equal
@require_flax
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
UpperCAmelCase_ : Optional[int] = FlaxBertModel(_a )
UpperCAmelCase_ : List[str] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) )
with self.assertRaises(_a ):
UpperCAmelCase_ : int = FlaxBertModel.from_pretrained(_a )
UpperCAmelCase_ : Optional[Any] = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
UpperCAmelCase_ : Any = FlaxBertModel(_a )
UpperCAmelCase_ : Optional[Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(_a , _a ) , max_shard_size="10KB" )
with self.assertRaises(_a ):
UpperCAmelCase_ : Any = FlaxBertModel.from_pretrained(_a )
UpperCAmelCase_ : List[str] = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertTrue(check_models_equal(_a , _a ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = """bert"""
UpperCAmelCase_ : List[Any] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(_a ):
UpperCAmelCase_ : Dict = FlaxBertModel.from_pretrained(_a )
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = """bert"""
UpperCAmelCase_ : str = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(_a ):
UpperCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(_a )
UpperCAmelCase_ : str = FlaxBertModel.from_pretrained(_a , subfolder=_a )
self.assertIsNotNone(_a )
| 61 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json"
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class lowercase ( UpperCamelCase__ ):
_a = "fnet"
def __init__( self , _a=3_2000 , _a=768 , _a=12 , _a=3072 , _a="gelu_new" , _a=0.1 , _a=512 , _a=4 , _a=0.02 , _a=1e-12 , _a=False , _a=512 , _a=3 , _a=1 , _a=2 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_A : Any = vocab_size
_A : str = max_position_embeddings
_A : Optional[Any] = hidden_size
_A : List[str] = num_hidden_layers
_A : List[str] = intermediate_size
_A : List[Any] = hidden_act
_A : List[str] = hidden_dropout_prob
_A : List[str] = initializer_range
_A : List[Any] = type_vocab_size
_A : List[Any] = layer_norm_eps
_A : List[str] = use_tpu_fourier_optimizations
_A : str = tpu_short_seq_length
| 26 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
a_ :str = "2.13.1"
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("3.7"):
raise ImportWarning(
"To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"
"If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
a_ :Any = concatenate_datasets
a_ :Optional[Any] = DownloadConfig
a_ :int = DownloadManager
a_ :Optional[Any] = DownloadMode
a_ :Dict = DownloadConfig
a_ :Any = DownloadMode
a_ :Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 277 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def UpperCAmelCase_( *a__ ):
"""simple docstring"""
with open(snake_case_ , '''r''' ) as fh:
fcntl.flock(snake_case_ , fcntl.LOCK_EX )
try:
print(*snake_case_ )
finally:
fcntl.flock(snake_case_ , fcntl.LOCK_UN )
a__ : Optional[int] = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
a__ : Dict = torch.device('''cuda''', local_rank)
a__ : Tuple = socket.gethostname()
a__ : Any = F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a__ : Optional[int] = dist.get_rank()
a__ : List[Any] = dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 313 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
_A : Optional[int] = 'bart'
_A : Optional[Any] = True
@st.cache(allow_output_mutation=snake_case_ )
def _a ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
lowerCamelCase__ : Tuple = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
lowerCamelCase__ : str = qar_model.eval()
else:
lowerCamelCase__ : Dict = (None, None)
if MODEL_TYPE == "bart":
lowerCamelCase__ : Tuple = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
lowerCamelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
lowerCamelCase__ : int = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
lowerCamelCase__ : Tuple = sas_model.eval()
else:
lowerCamelCase__ : Optional[int] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case_ )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
lowerCamelCase__ : int = faiss.StandardGpuResources()
lowerCamelCase__ : int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )["""train"""]
lowerCamelCase__ : List[str] = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
lowerCamelCase__ : Any = faiss.IndexFlatIP(128 )
lowerCamelCase__ : Union[str, Any] = faiss.index_cpu_to_gpu(snake_case_ , 1 , snake_case_ )
wikiaab_gpu_index_flat.add(snake_case_ ) # TODO fix for larger GPU
else:
lowerCamelCase__ : List[str] = (None, None)
lowerCamelCase__ : List[str] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case_ )
def _a ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Tuple = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
lowerCamelCase__ : Optional[int] = elia["""train_eli5"""]
lowerCamelCase__ : int = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
lowerCamelCase__ : int = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(snake_case_ )
return (elia_train, eli5_train_q_index)
_A , _A , _A : Union[str, Any] = load_indexes()
_A , _A , _A , _A : str = load_models()
_A , _A : Optional[int] = load_train_data()
def _a ( UpperCAmelCase , UpperCAmelCase=10 ) -> str:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = embed_questions_for_retrieval([question] , snake_case_ , snake_case_ )
lowerCamelCase__ : Optional[Any] = eli5_train_q_index.search(snake_case_ , snake_case_ )
lowerCamelCase__ : int = [elia_train[int(snake_case_ )] for i in I[0]]
return nn_examples
def _a ( UpperCAmelCase , UpperCAmelCase="wiki40b" , UpperCAmelCase="dense" , UpperCAmelCase=10 ) -> Union[str, Any]:
"""simple docstring"""
if source == "none":
lowerCamelCase__ : List[str] = (""" <P> """.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCamelCase__ : int = query_qa_dense_index(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
lowerCamelCase__ : Union[str, Any] = query_es_index(
snake_case_ , snake_case_ , index_name='''english_wiki40b_snippets_100w''' , n_results=snake_case_ , )
lowerCamelCase__ : Tuple = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowerCamelCase__ : Dict = """question: {} context: {}""".format(snake_case_ , snake_case_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCAmelCase : None),
} )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=64 , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=0.95 , UpperCAmelCase=0.8 ) -> List[Any]:
"""simple docstring"""
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = qa_sas_generate(
snake_case_ , snake_case_ , snake_case_ , num_answers=1 , num_beams=snake_case_ , min_len=snake_case_ , max_len=snake_case_ , do_sample=snake_case_ , temp=snake_case_ , top_p=snake_case_ , top_k=snake_case_ , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
_A : int = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
_A : Union[str, Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
_A : List[Any] = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
_A : Tuple = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
_A : Dict = st.sidebar.checkbox('Demo options')
if demo_options:
_A : str = st.sidebar.selectbox(
'',
action_list,
index=3,
)
_A : str = action_list.index(action_st)
_A : Dict = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
_A : Dict = show_type == 'Show full text of passages'
else:
_A : Dict = 3
_A : Optional[Any] = True
_A : Optional[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
_A : Optional[int] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
_A : Optional[Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
_A : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
_A : List[Any] = 'wiki40b'
_A : Union[str, Any] = 'dense'
_A : Optional[int] = 'beam'
_A : Optional[Any] = 2
_A : List[str] = 64
_A : Union[str, Any] = 2_56
_A : List[Any] = None
_A : Optional[int] = None
_A : str = st.sidebar.checkbox('Generation options')
if generate_options:
_A : Optional[int] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
_A : Union[str, Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
_A : Optional[Any] = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
_A : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
_A : List[str] = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
_A : int = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
_A : List[Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
_A : List[str] = None
# start main text
_A : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
_A : int = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
_A : Union[str, Any] = st.text_input('Enter your question here:', '')
else:
_A : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
_A , _A : List[Any] = make_support(question, source=wiki_source, method='dense', n_results=10)
_A , _A : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
_A : Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
_A : Tuple = support_list[:10]
_A : Any = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
_A , _A : Tuple = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
_A , _A : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
_A : Dict = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
_A : str = res[1].strip()
if sec_titles == "":
_A : Optional[int] = '[{}]({})'.format(res[0], wiki_url)
else:
_A : Optional[int] = sec_titles.split(' & ')
_A : List[Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style=\"font-family:arial; font-size:10pt;\">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
_A : Optional[Any] = find_nearest_training(question)
_A : str = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
_A : List[str] = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
_A : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 142 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ) -> Dict:
_A : str = parent
_A : int = batch_size
_A : Optional[int] = num_channels
_A : List[Any] = image_size
_A : int = min_resolution
_A : Optional[int] = max_resolution
_A : Any = do_resize
_A : List[str] = size if size is not None else {"""height""": 18, """width""": 20}
_A : Optional[int] = do_thumbnail
_A : str = do_align_axis
_A : List[Any] = do_pad
_A : Optional[Any] = do_normalize
_A : Tuple = image_mean
_A : List[str] = image_std
def a__ ( self ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DonutImageProcessor if is_vision_available() else None
def a__ ( self ) -> Optional[int]:
_A : List[str] = DonutImageProcessingTester(self )
@property
def a__ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_thumbnail""" ) )
self.assertTrue(hasattr(_a , """do_align_long_axis""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
def a__ ( self ) -> List[Any]:
_A : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def a__ ( self ) -> Union[str, Any]:
pass
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : Any = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Dict:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : List[str] = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A : str = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 26 | 0 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def __lowercase ( _UpperCamelCase ) ->Any:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowercase : str = k.replace(snake_case_, snake_case_ )
if k.startswith('''encoder''' ):
lowercase : Optional[Any] = k.replace('''.attn''', '''.self_attn''' )
lowercase : Dict = k.replace('''norm1''', '''self_attn_layer_norm''' )
lowercase : Optional[Any] = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
lowercase : str = k.replace('''norm1''', '''self_attn_layer_norm''' )
lowercase : Any = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
lowercase : Optional[int] = k.replace('''norm3''', '''final_layer_norm''' )
return k
def __lowercase ( _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
lowercase : List[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowercase : str = sd.pop(snake_case_ )
lowercase : Optional[int] = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
lowercase : Optional[int] = v
__a = ['''START''']
@torch.no_grad()
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Dict:
"""simple docstring"""
lowercase : Tuple = torch.load(snake_case_, map_location='''cpu''' )
lowercase : List[Any] = model["""model"""]
lowercase : Optional[Any] = BlenderbotConfig.from_json_file(snake_case_ )
lowercase : List[str] = BlenderbotForConditionalGeneration(snake_case_ )
lowercase : Tuple = m.model.state_dict().keys()
lowercase : Any = []
lowercase : Dict = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowercase : Optional[int] = rename_state_dict_key(snake_case_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowercase : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case_ )
m.model.load_state_dict(snake_case_, strict=snake_case_ )
m.half()
m.save_pretrained(snake_case_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
__a = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 337 |
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , a : List[Any]=3 , a : Dict=3 , a : List[str]=("DownEncoderBlock2D",) , a : Union[str, Any]=(64,) , a : str=2 , a : List[str]=32 , a : Optional[int]="silu" , a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = torch.nn.Convad(
_a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : str = None
SCREAMING_SNAKE_CASE : int = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(_a ):
SCREAMING_SNAKE_CASE : int = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Optional[Any] = i == len(_a ) - 1
SCREAMING_SNAKE_CASE : Tuple = get_down_block(
_a , num_layers=self.layers_per_block , in_channels=_a , out_channels=_a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , )
self.down_blocks.append(_a )
# mid
SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# out
SCREAMING_SNAKE_CASE : Any = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_a , eps=1e-6 )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : List[str] = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , _a , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Any = False
def __UpperCamelCase ( self : Dict , a : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = x
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_in(_a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a : Any ):
def custom_forward(*a : List[str] ):
return module(*_a )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) , _a , use_reentrant=_a )
# middle
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , use_reentrant=_a )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a )
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _a )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : List[Any] = down_block(_a )
# middle
SCREAMING_SNAKE_CASE : Dict = self.mid_block(_a )
# post-process
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(_a )
SCREAMING_SNAKE_CASE : List[str] = self.conv_act(_a )
SCREAMING_SNAKE_CASE : str = self.conv_out(_a )
return sample
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , a : Union[str, Any]=3 , a : Optional[Any]=3 , a : str=("UpDecoderBlock2D",) , a : str=(64,) , a : Any=2 , a : Union[str, Any]=32 , a : Dict="silu" , a : int="group" , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = layers_per_block
SCREAMING_SNAKE_CASE : Any = nn.Convad(
_a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : str = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : Optional[int] = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : List[str] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_a , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_a , temb_channels=_a , )
# up
SCREAMING_SNAKE_CASE : List[str] = list(reversed(_a ) )
SCREAMING_SNAKE_CASE : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_a ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : Optional[Any] = i == len(_a ) - 1
SCREAMING_SNAKE_CASE : List[str] = get_up_block(
_a , num_layers=self.layers_per_block + 1 , in_channels=_a , out_channels=_a , prev_output_channel=_a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_a , resnet_groups=_a , attention_head_dim=_a , temb_channels=_a , resnet_time_scale_shift=_a , )
self.up_blocks.append(_a )
SCREAMING_SNAKE_CASE : Optional[int] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : Dict = SpatialNorm(block_out_channels[0] , _a )
else:
SCREAMING_SNAKE_CASE : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_a , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[str] = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , _a , 3 , padding=1 )
SCREAMING_SNAKE_CASE : int = False
def __UpperCamelCase ( self : List[str] , a : List[str] , a : Any=None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = z
SCREAMING_SNAKE_CASE : Dict = self.conv_in(_a )
SCREAMING_SNAKE_CASE : str = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a : Optional[Any] ):
def custom_forward(*a : Optional[int] ):
return module(*_a )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , _a , use_reentrant=_a )
SCREAMING_SNAKE_CASE : Dict = sample.to(_a )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(_a ) , _a , _a , use_reentrant=_a )
else:
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _a , _a )
SCREAMING_SNAKE_CASE : Optional[int] = sample.to(_a )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(_a ) , _a , _a )
else:
# middle
SCREAMING_SNAKE_CASE : Tuple = self.mid_block(_a , _a )
SCREAMING_SNAKE_CASE : Dict = sample.to(_a )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Dict = up_block(_a , _a )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : List[Any] = self.conv_norm_out(_a )
else:
SCREAMING_SNAKE_CASE : int = self.conv_norm_out(_a , _a )
SCREAMING_SNAKE_CASE : Tuple = self.conv_act(_a )
SCREAMING_SNAKE_CASE : Tuple = self.conv_out(_a )
return sample
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , a : Any , a : int , a : Optional[Any] , a : int=None , a : Union[str, Any]="random" , a : str=False , a : Any=True ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE : List[str] = n_e
SCREAMING_SNAKE_CASE : List[str] = vq_embed_dim
SCREAMING_SNAKE_CASE : int = beta
SCREAMING_SNAKE_CASE : Optional[Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Tuple = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = self.used.shape[0]
SCREAMING_SNAKE_CASE : Dict = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : List[str] = self.re_embed
SCREAMING_SNAKE_CASE : str = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices." )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = n_e
SCREAMING_SNAKE_CASE : str = sane_index_shape
def __UpperCamelCase ( self : int , a : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = inds.shape
assert len(_a ) > 1
SCREAMING_SNAKE_CASE : Dict = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Any = self.used.to(_a )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Optional[int] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Tuple = self.unknown_index
return new.reshape(_a )
def __UpperCamelCase ( self : int , a : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = inds.shape
assert len(_a ) > 1
SCREAMING_SNAKE_CASE : Union[str, Any] = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : List[Any] = self.used.to(_a )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : Union[str, Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : int = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _a )
return back.reshape(_a )
def __UpperCamelCase ( self : Dict , a : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : List[str] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Tuple = torch.argmin(torch.cdist(_a , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = self.embedding(_a ).view(z.shape )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : int = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Any = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : int = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(_a )
SCREAMING_SNAKE_CASE : str = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self : Tuple , a : int , a : Tuple ) -> Dict:
"""simple docstring"""
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[int] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : str = self.unmap_to_all(_a )
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : Optional[int] = self.embedding(_a )
if shape is not None:
SCREAMING_SNAKE_CASE : Tuple = z_q.view(_a )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : str = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class _UpperCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , a : Optional[int] , a : Any=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = parameters
SCREAMING_SNAKE_CASE : Dict = torch.chunk(_a , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Optional[int] = deterministic
SCREAMING_SNAKE_CASE : List[str] = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Dict = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : str = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self : List[Any] , a : List[Any] = None ) -> torch.FloatTensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(
self.mean.shape , generator=_a , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def __UpperCamelCase ( self : List[str] , a : Any=None ) -> int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self : List[str] , a : List[str] , a : Tuple=[1, 2, 3] ) -> Dict:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : Dict = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_a )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
return self.mean | 76 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ = 8,snake_case_ = 1024,snake_case_="val",snake_case_=None,snake_case_=False,snake_case_="summarization",snake_case_=None,snake_case_=1,snake_case_ = None,snake_case_="",**snake_case_,):
_A : Dict = str(snake_case_ )
assert local_rank is not None
torch.distributed.init_process_group(backend="""nccl""",rank=snake_case_ )
_A : Tuple = Path(snake_case_ )
_A : List[Any] = save_dir.joinpath(f'''rank_{local_rank}_output.json''' )
torch.cuda.set_device(snake_case_ )
_A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(snake_case_ ).cuda()
if fpaa:
_A : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case_,snake_case_ ) # update config with task specific params
_A : str = generate_kwargs.pop("""num_beams""",model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
_A : int = num_return_sequences
_A : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
if max_source_length is None:
_A : Optional[int] = tokenizer.model_max_length
if prefix is None:
_A : Tuple = prefix or getattr(model.config,"""prefix""","""""" ) or """"""
_A : Optional[int] = SeqaSeqDataset(
snake_case_,snake_case_,snake_case_,max_target_length=1024,type_path=snake_case_,n_obs=snake_case_,prefix=snake_case_,**snake_case_,)
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
_A : Optional[int] = ds.make_sortish_sampler(snake_case_,distributed=snake_case_,add_extra_examples=snake_case_,shuffle=snake_case_ )
_A : Dict = DataLoader(snake_case_,sampler=snake_case_,batch_size=snake_case_,collate_fn=ds.collate_fn )
_A : Optional[Any] = []
for batch in tqdm(snake_case_ ):
_A : Tuple = model.generate(
input_ids=batch["""input_ids"""].to(model.device ),attention_mask=batch["""attention_mask"""].to(model.device ),num_return_sequences=snake_case_,num_beams=snake_case_,**snake_case_,)
_A : Any = tokenizer.batch_decode(snake_case_,skip_special_tokens=snake_case_,clean_up_tokenization_spaces=snake_case_ )
_A : Dict = batch["""ids"""]
if num_return_sequences > 1:
_A : Any = chunks(snake_case_,snake_case_ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case_ ):
results.append({"""pred""": pred, """id""": ids[i].item()} )
save_json(snake_case_,snake_case_ )
return results, sampler.num_replicas
def lowerCAmelCase_ ( ):
_A : Tuple = argparse.ArgumentParser(
epilog="""Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate""" )
parser.add_argument("""--data_dir""",type=snake_case_,help="""like cnn_dm/test.source""" )
parser.add_argument(
"""--model_name""",type=snake_case_,help="""like facebook/bart-large-cnn,t5-base, etc.""",default="""sshleifer/distilbart-xsum-12-3""",)
parser.add_argument("""--save_dir""",type=snake_case_,help="""where to save""",default="""tmp_gen""" )
parser.add_argument("""--max_source_length""",type=snake_case_,default=snake_case_ )
parser.add_argument(
"""--type_path""",type=snake_case_,default="""test""",help="""which subset to evaluate typically train/val/test""" )
parser.add_argument("""--task""",type=snake_case_,default="""summarization""",help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""",type=snake_case_,default=8,required=snake_case_,help="""batch size""" )
parser.add_argument(
"""--local_rank""",type=snake_case_,default=-1,required=snake_case_,help="""should be passed by distributed.launch""" )
parser.add_argument(
"""--n_obs""",type=snake_case_,default=snake_case_,required=snake_case_,help="""How many observations. Defaults to all.""" )
parser.add_argument(
"""--num_return_sequences""",type=snake_case_,default=1,required=snake_case_,help="""How many sequences to return""" )
parser.add_argument(
"""--sync_timeout""",type=snake_case_,default=600,required=snake_case_,help="""How long should master process wait for other processes to finish.""",)
parser.add_argument("""--src_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument("""--tgt_lang""",type=snake_case_,default=snake_case_,required=snake_case_ )
parser.add_argument(
"""--prefix""",type=snake_case_,required=snake_case_,default=snake_case_,help="""will be added to the begininng of src examples""" )
parser.add_argument("""--fp16""",action="""store_true""" )
parser.add_argument("""--debug""",action="""store_true""" )
_A : Union[str, Any] = time.time()
_A , _A : List[str] = parser.parse_known_args()
_A : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case_ )
if generate_kwargs and args.local_rank <= 0:
print(f'''parsed the following generate kwargs: {generate_kwargs}''' )
_A : Dict = Path(args.save_dir + """_tmp""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) # this handles locking.
_A : int = list(json_save_dir.glob("""rank_*.json""" ) )
if intermediate_files:
raise ValueError(f'''Found files at {json_save_dir} please move or remove them.''' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
_A : Any = {}
if args.src_lang is not None:
_A : int = args.src_lang
if args.tgt_lang is not None:
_A : Dict = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case_ )
_A , _A : str = eval_data_dir(
args.data_dir,snake_case_,args.model_name,type_path=args.type_path,bs=args.bs,fpaa=args.fpaa,task=args.task,local_rank=args.local_rank,n_obs=args.n_obs,max_source_length=args.max_source_length,num_return_sequences=args.num_return_sequences,prefix=args.prefix,dataset_kwargs=snake_case_,**snake_case_,)
if args.local_rank <= 0:
_A : List[Any] = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case_ )
_A : Tuple = gather_results_from_each_node(snake_case_,snake_case_,args.sync_timeout )
_A : Optional[int] = combine_partial_results(snake_case_ )
if args.num_return_sequences > 1:
_A : Optional[Any] = save_dir.joinpath("""pseudolabel_results.json""" )
print(f'''Saving aggregated results at {save_path}, intermediate in {json_save_dir}/''' )
save_json(snake_case_,snake_case_ )
return
_A : List[str] = Path(args.data_dir ).joinpath(args.type_path + """.target""" )
with open(snake_case_ ) as f:
_A : int = [x.rstrip() for x in f.readlines()][: len(snake_case_ )]
# Calculate metrics, save metrics, and save _generations.txt
_A : Dict = """translation""" in args.task
_A : Optional[Any] = calculate_bleu if calc_bleu else calculate_rouge
_A : Tuple = """bleu""" if calc_bleu else """rouge"""
_A : Dict = score_fn(snake_case_,snake_case_ )
_A : List[Any] = len(snake_case_ )
_A : Optional[int] = time.time() - start_time
_A : Dict = round(runtime / metrics["""n_obs"""],4 )
_A : Dict = num_replicas
# TODO(@stas00): add whatever metadata to metrics
_A : Any = save_dir.joinpath(f'''{args.type_path}_{metric_name}.json''' )
save_json(snake_case_,snake_case_,indent=snake_case_ )
print(snake_case_ )
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}_generations.txt''' ) )
if args.debug:
write_txt_file(snake_case_,save_dir.joinpath(f'''{args.type_path}.target''' ) )
else:
shutil.rmtree(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Dict = []
for partial_result in partial_results:
records.extend(snake_case_ )
_A : Optional[Any] = sorted(snake_case_,key=lambda snake_case_ : x["id"] )
_A : List[str] = [x["""pred"""] for x in records]
return preds
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# WAIT FOR lots of .json files
_A : Optional[Any] = time.time()
logger.info("""waiting for all nodes to finish""" )
_A : List[str] = None
while (time.time() - start_wait) < timeout:
_A : str = list(save_dir.glob("""rank_*.json""" ) )
if len(snake_case_ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
_A : List[str] = lmap(snake_case_,snake_case_ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError("""Rank 0 gave up on waiting for other processes""" )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 26 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def snake_case__ ( _A: Any ) -> int:
'''simple docstring'''
if "cls_token" in name:
lowerCAmelCase = name.replace("""cls_token""" , """vit.embeddings.cls_token""" )
if "mask_token" in name:
lowerCAmelCase = name.replace("""mask_token""" , """decoder.mask_token""" )
if "decoder_pos_embed" in name:
lowerCAmelCase = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
lowerCAmelCase = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" )
if "decoder_blocks" in name:
lowerCAmelCase = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
lowerCAmelCase = name.replace("""blocks""" , """vit.encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
lowerCAmelCase = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
lowerCAmelCase = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
lowerCAmelCase = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name:
lowerCAmelCase = name.replace("""norm.weight""" , """vit.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name:
lowerCAmelCase = name.replace("""norm.bias""" , """vit.layernorm.bias""" )
return name
def snake_case__ ( _A: int , _A: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
lowerCAmelCase = key.split(""".""" )
lowerCAmelCase = int(key_split[1] )
if "decoder_blocks" in key:
lowerCAmelCase = config.decoder_hidden_size
lowerCAmelCase = """decoder.decoder_layers."""
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
else:
lowerCAmelCase = config.hidden_size
lowerCAmelCase = """vit.encoder.layer."""
if "weight" in key:
lowerCAmelCase = val[:dim, :]
lowerCAmelCase = val[dim : dim * 2, :]
lowerCAmelCase = val[-dim:, :]
elif "bias" in key:
lowerCAmelCase = val[:dim]
lowerCAmelCase = val[dim : dim * 2]
lowerCAmelCase = val[-dim:]
else:
lowerCAmelCase = val
return orig_state_dict
def snake_case__ ( _A: int , _A: Any ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase = ViTMAEConfig()
if "large" in checkpoint_url:
lowerCAmelCase = 1024
lowerCAmelCase = 4096
lowerCAmelCase = 24
lowerCAmelCase = 16
elif "huge" in checkpoint_url:
lowerCAmelCase = 14
lowerCAmelCase = 1280
lowerCAmelCase = 5120
lowerCAmelCase = 32
lowerCAmelCase = 16
lowerCAmelCase = ViTMAEForPreTraining(snake_case_ )
lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" )["""model"""]
lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase = convert_state_dict(snake_case_ , snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
lowerCAmelCase = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"""
lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
lowerCAmelCase = ViTMAEImageProcessor(size=config.image_size )
lowerCAmelCase = image_processor(images=snake_case_ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowerCAmelCase = model(**snake_case_ )
lowerCAmelCase = outputs.logits
if "large" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
lowerCAmelCase = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , snake_case_ , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 272 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26 | 0 |
import requests
def lowerCamelCase__ ( a , a ) -> List[str]:
_A: List[str] = {"""Content-Type""": """application/json"""}
_A: str = requests.post(snake_case_ , json={'''text''': message_body} , headers=snake_case_ )
if response.status_code != 2_00:
_A: Union[str, Any] = (
"""Request to slack returned an error """
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 121 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n"
@dataclass
class lowercase ( UpperCamelCase__ ):
_a = 42
class lowercase ( UpperCamelCase__ ):
def __init__( self , _a , _a , _a , _a , _a , ) -> List[Any]:
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def a__ ( self , _a , _a , _a , _a , _a , _a ) -> str:
if latents is None:
_A : str = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_A : Union[str, Any] = latents.to(_a )
_A : int = latents * scheduler.init_noise_sigma
return latents
def a__ ( self , _a=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_A : str = torch.device(F'''cuda:{gpu_id}''' )
_A : Any = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def a__ ( self ) -> List[Any]:
if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def a__ ( self , _a , _a , _a , _a , ) -> Tuple:
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
_A : int = torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
_A : Dict = self.image_processor(_a , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 )
_A : int = image.to(dtype=self.image_encoder.dtype , device=_a )
_A : List[Any] = self.image_encoder(_a )["""last_hidden_state"""]
_A : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_A : Dict = image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
_A : str = torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A : List[str] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a = 1 , _a = 25 , _a = None , _a = None , _a = 4.0 , _a = 64 , _a = "pil" , _a = True , ) -> Union[str, Any]:
if isinstance(_a , PIL.Image.Image ):
_A : List[Any] = 1
elif isinstance(_a , torch.Tensor ):
_A : Any = image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_A : Union[str, Any] = len(_a )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}''' )
_A : Optional[int] = self._execution_device
_A : Tuple = batch_size * num_images_per_prompt
_A : List[Any] = guidance_scale > 1.0
_A : Optional[Any] = self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
_A : Optional[int] = self.scheduler.timesteps
_A : List[str] = self.prior.config.num_embeddings
_A : int = self.prior.config.embedding_dim
_A : Optional[Any] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_A : List[Any] = latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
_A : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A : int = self.scheduler.scale_model_input(_a , _a )
_A : Tuple = self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
_A , _A : Optional[Any] = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_A , _A : Dict = noise_pred.chunk(2 )
_A : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_A : int = self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
_A : List[str] = []
for i, latent in enumerate(_a ):
print()
_A : List[str] = self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_a )
_A : List[Any] = torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_A : List[str] = images.cpu().numpy()
if output_type == "pil":
_A : List[Any] = [self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 26 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Any = {'vocab_file': 'spiece.model'}
snake_case__ : Tuple = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class A_ ( UpperCamelCase__ ):
def __init__(self :int , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :List[Any]=False , _UpperCamelCase :Optional[Any]=True , _UpperCamelCase :List[str]=False , _UpperCamelCase :Dict="<s>" , _UpperCamelCase :List[Any]="</s>" , _UpperCamelCase :Optional[int]="<unk>" , _UpperCamelCase :List[str]="<sep>" , _UpperCamelCase :str="<pad>" , _UpperCamelCase :Tuple="<cls>" , _UpperCamelCase :Any="<mask>" , _UpperCamelCase :List[Any]=["<eop>", "<eod>"] , _UpperCamelCase :int = None , **_UpperCamelCase :str , )-> None:
__A = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__A = 3
__A = do_lower_case
__A = remove_space
__A = keep_accents
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
__A = jieba
__A = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _lowerCAmelCase (self :Tuple )-> List[Any]:
return len(self.sp_model )
def _lowerCAmelCase (self :Optional[int] )-> Tuple:
__A = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self :Dict )-> Tuple:
__A = self.__dict__.copy()
__A = None
return state
def __setstate__(self :List[str] , _UpperCamelCase :List[str] )-> Union[str, Any]:
__A = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase (self :Any , _UpperCamelCase :Dict )-> str:
if self.remove_space:
__A = """ """.join(inputs.strip().split() )
else:
__A = inputs
__A = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
__A = unicodedata.normalize('''NFKD''' , _a )
__A = """""".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__A = outputs.lower()
return outputs
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :str )-> List[str]:
__A = self.preprocess_text(_a )
__A = self.sp_model.encode(_a , out_type=_a )
__A = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__A = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__A = cur_pieces[1:]
else:
__A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def _lowerCAmelCase (self :str , _UpperCamelCase :Tuple )-> Union[str, Any]:
return self.sp_model.PieceToId(_a )
def _lowerCAmelCase (self :int , _UpperCamelCase :Optional[int] )-> Optional[Any]:
return self.sp_model.IdToPiece(_a )
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Optional[int] )-> int:
__A = """""".join(_a ).replace(_a , ''' ''' ).strip()
return out_string
def _lowerCAmelCase (self :str , _UpperCamelCase :Tuple , _UpperCamelCase :Dict = None )-> List[int]:
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :Optional[int] , _UpperCamelCase :List[str] = None , _UpperCamelCase :Dict = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :Optional[Any] , _UpperCamelCase :Tuple = None )-> List[int]:
__A = [self.sep_token_id]
__A = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :str , _UpperCamelCase :Optional[Any] = None )-> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__A = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _lowerCAmelCase (self :Any , *_UpperCamelCase :List[str] , **_UpperCamelCase :Optional[Any] )-> Optional[Any]:
__A = super()._decode(*_a , **_a )
__A = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 117 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case_ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(snake_case_,snake_case_="",snake_case_="." ):
_A : Union[str, Any] = []
for k, v in d.items():
_A : Optional[int] = parent_key + sep + k if parent_key else k
if isinstance(snake_case_,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case_,snake_case_,sep=snake_case_ ).items() )
else:
items.append((new_key, v) )
return dict(snake_case_ )
_A : List[Any] = argparse.Namespace()
with open(snake_case_,"""r""" ) as yaml_file:
try:
_A : List[Any] = yaml.load(snake_case_,Loader=yaml.FullLoader )
_A : Optional[int] = flatten_yaml_as_dict(snake_case_ )
for k, v in flat_cfg.items():
setattr(snake_case_,snake_case_,snake_case_ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(snake_case_,str(snake_case_ ) ) )
return config
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = MobileViTVaConfig()
_A : Tuple = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
_A : Dict = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : int = 384
else:
_A : int = 256
_A : List[str] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
_A : Union[str, Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
_A : str = 384
else:
_A : List[Any] = 256
_A : List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
_A : int = 151
_A : int = 512
_A : Optional[int] = """ade20k-id2label.json"""
_A : Any = True
elif task_name.startswith("""voc_""" ):
_A : List[Any] = 21
_A : Dict = 512
_A : Dict = """pascal-voc-id2label.json"""
_A : int = True
# orig_config
_A : Any = load_orig_config_file(snake_case_ )
assert getattr(snake_case_,"""model.classification.name""",-1 ) == "mobilevit_v2", "Invalid model"
_A : List[Any] = getattr(snake_case_,"""model.classification.mitv2.width_multiplier""",1.0 )
assert (
getattr(snake_case_,"""model.classification.mitv2.attn_norm_layer""",-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_A : str = getattr(snake_case_,"""model.classification.activation.name""","""swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_A : Optional[int] = getattr(snake_case_,"""model.segmentation.output_stride""",16 )
if "_deeplabv3" in task_name:
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_rates""",[12, 24, 36] )
_A : int = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_out_channels""",512 )
_A : str = getattr(snake_case_,"""model.segmentation.deeplabv3.aspp_dropout""",0.1 )
# id2label
_A : List[Any] = """huggingface/label-files"""
_A : List[Any] = json.load(open(hf_hub_download(snake_case_,snake_case_,repo_type="""dataset""" ),"""r""" ) )
_A : str = {int(snake_case_ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Any = dct.pop(snake_case_ )
_A : Union[str, Any] = val
def lowerCAmelCase_ ( snake_case_,snake_case_=False ):
if base_model:
_A : Optional[int] = """"""
else:
_A : Dict = """mobilevitv2."""
_A : int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_A : Any = k[8:]
else:
_A : List[str] = k
if ".block." in k:
_A : Any = k_new.replace(""".block.""",""".""" )
if ".conv." in k:
_A : List[Any] = k_new.replace(""".conv.""",""".convolution.""" )
if ".norm." in k:
_A : Any = k_new.replace(""".norm.""",""".normalization.""" )
if "conv_1." in k:
_A : int = k_new.replace("""conv_1.""",f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.''',f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_A : Tuple = k_new.replace(""".exp_1x1.""",""".expand_1x1.""" )
if ".red_1x1." in k:
_A : Optional[int] = k_new.replace(""".red_1x1.""",""".reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
_A : Optional[int] = k_new.replace(f'''layer_{i}.0.''',f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
_A : Union[str, Any] = k_new.replace(f'''layer_{i}.1.local_rep.0.''',f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
_A : str = k_new.replace(f'''layer_{i}.1.local_rep.1.''',f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_A : Optional[int] = [0, 1]
elif i == 4:
_A : Union[str, Any] = [0, 1, 2, 3]
elif i == 5:
_A : Optional[Any] = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
_A : Union[str, Any] = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''',f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
_A : List[str] = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''',f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
_A : Optional[Any] = k_new.replace(f'''layer_{i}.1.conv_proj.''',f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_attn.0.""","""layernorm_before.""" )
if "pre_norm_attn.1." in k:
_A : str = k_new.replace("""pre_norm_attn.1.""","""attention.""" )
if "pre_norm_ffn.0." in k:
_A : Optional[Any] = k_new.replace("""pre_norm_ffn.0.""","""layernorm_after.""" )
if "pre_norm_ffn.1." in k:
_A : Dict = k_new.replace("""pre_norm_ffn.1.""","""ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
_A : List[str] = k_new.replace("""pre_norm_ffn.3.""","""ffn.conv2.""" )
if "classifier.1." in k:
_A : List[str] = k_new.replace("""classifier.1.""","""classifier.""" )
if "seg_head." in k:
_A : List[Any] = k_new.replace("""seg_head.""","""segmentation_head.""" )
if ".aspp_layer." in k:
_A : List[Any] = k_new.replace(""".aspp_layer.""",""".""" )
if ".aspp_pool." in k:
_A : Optional[Any] = k_new.replace(""".aspp_pool.""",""".""" )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(snake_case_ )
for k in keys_to_ignore:
state_dict.pop(snake_case_,snake_case_ )
def lowerCAmelCase_ ( ):
_A : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_A : List[Any] = Image.open(requests.get(snake_case_,stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
_A : List[Any] = get_mobilevitva_config(snake_case_,snake_case_ )
# load original state_dict
_A : Tuple = torch.load(snake_case_,map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
_A : Optional[Any] = MobileViTVaForSemanticSegmentation(snake_case_ ).eval()
_A : str = False
else:
_A : int = MobileViTVaForImageClassification(snake_case_ ).eval()
_A : List[Any] = False
# remove and rename some keys of load the original model
_A : List[Any] = checkpoint
remove_unused_keys(snake_case_ )
_A : Optional[Any] = create_rename_keys(snake_case_,base_model=snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_,snake_case_,snake_case_ )
# load modified state_dict
model.load_state_dict(snake_case_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_A : str = MobileViTImageProcessor(crop_size=config.image_size,size=config.image_size + 32 )
_A : List[Any] = image_processor(images=prepare_img(),return_tensors="""pt""" )
_A : Optional[Any] = model(**snake_case_ )
# verify classification model
if task_name.startswith("""imagenet""" ):
_A : List[Any] = outputs.logits
_A : Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""",model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_A : int = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3],snake_case_,atol=1e-4 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
_snake_case = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 26 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
_lowerCamelCase :List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Tuple = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(_a ) )
lowerCAmelCase__ : Optional[Any] = np.random.RandomState(_a )
lowerCAmelCase__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ : Any = self.get_dummy_inputs()
lowerCAmelCase__ : List[str] = pipe(**_a ).images
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ : List[Any] = np.array([0.6_9643, 0.5_8484, 0.5_0314, 0.5_8760, 0.5_5368, 0.5_9643, 0.5_1529, 0.4_1217, 0.4_9087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase__ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs()
lowerCAmelCase__ : Union[str, Any] = pipe(**_a ).images
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ : Dict = np.array([0.6_1737, 0.5_4642, 0.5_3183, 0.5_4465, 0.5_2742, 0.6_0525, 0.4_9969, 0.4_0655, 0.4_8154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
# warmup pass to apply optimizations
lowerCAmelCase__ : List[Any] = pipe(**self.get_dummy_inputs() )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs()
lowerCAmelCase__ : List[str] = pipe(**_a ).images
lowerCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ : Dict = np.array([0.5_2761, 0.5_9977, 0.4_9033, 0.4_9619, 0.5_4282, 0.5_0311, 0.4_7600, 0.4_0918, 0.4_5203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase__ : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ : int = self.get_dummy_inputs()
lowerCAmelCase__ : Tuple = pipe(**_a ).images
lowerCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ : Any = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase__ : Union[str, Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ : Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase__ : Tuple = pipe(**_a ).images
lowerCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ : str = np.array([0.5_2911, 0.6_0004, 0.4_9229, 0.4_9805, 0.5_4502, 0.5_0680, 0.4_7777, 0.4_1028, 0.4_5304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase__ : List[str] = pipe(**_a ).images
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
lowerCAmelCase__ : Optional[int] = np.array([0.6_5331, 0.5_8277, 0.4_8204, 0.5_6059, 0.5_3665, 0.5_6235, 0.5_0969, 0.4_0009, 0.4_6552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
@property
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ort.SessionOptions()
lowerCAmelCase__ : List[str] = False
return options
def _lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase__ : Optional[Any] = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
lowerCAmelCase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ : str = """A fantasy landscape, trending on artstation"""
lowerCAmelCase__ : List[str] = np.random.RandomState(0 )
lowerCAmelCase__ : Dict = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="""np""" , )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : Optional[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase__ : Optional[int] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase__ : Union[str, Any] = init_image.resize((7_68, 5_12) )
lowerCAmelCase__ : Any = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
lowerCAmelCase__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase__ : str = """A fantasy landscape, trending on artstation"""
lowerCAmelCase__ : int = np.random.RandomState(0 )
lowerCAmelCase__ : Optional[Any] = pipe(
prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type="""np""" , )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
lowerCAmelCase__ : List[Any] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 242 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowercase ( UpperCamelCase__ ):
_a = (DPMSolverSDEScheduler,)
_a = 1_0
def a__ ( self , **_a ) -> Optional[Any]:
_A : str = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**_a )
return config
def a__ ( self ) -> Tuple:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def a__ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def a__ ( self ) -> Any:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def a__ ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def a__ ( self ) -> Optional[int]:
_A : Any = self.scheduler_classes[0]
_A : List[str] = self.get_scheduler_config()
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Dict = self.dummy_model()
_A : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Dict = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : str = model(_a , _a )
_A : List[Any] = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Dict = torch.sum(torch.abs(_a ) )
_A : Dict = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Optional[Any]:
_A : Dict = self.scheduler_classes[0]
_A : Optional[int] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_A : Optional[Any] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
_A : Tuple = self.dummy_model()
_A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
_A : Tuple = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
_A : int = scheduler.scale_model_input(_a , _a )
_A : Tuple = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Optional[int] = output.prev_sample
_A : Optional[Any] = torch.sum(torch.abs(_a ) )
_A : List[Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def a__ ( self ) -> List[str]:
_A : Union[str, Any] = self.scheduler_classes[0]
_A : List[Any] = self.get_scheduler_config()
_A : List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Union[str, Any] = self.dummy_model()
_A : Optional[Any] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_A : int = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : Dict = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : str = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def a__ ( self ) -> Union[str, Any]:
_A : List[Any] = self.scheduler_classes[0]
_A : Optional[Any] = self.get_scheduler_config()
_A : int = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
_A : Optional[Any] = self.dummy_model()
_A : Dict = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
_A : str = sample.to(_a )
for t in scheduler.timesteps:
_A : Optional[int] = scheduler.scale_model_input(_a , _a )
_A : List[Any] = model(_a , _a )
_A : Dict = scheduler.step(_a , _a , _a )
_A : List[str] = output.prev_sample
_A : str = torch.sum(torch.abs(_a ) )
_A : List[str] = torch.mean(torch.abs(_a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 26 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __a ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
return np.dot(snake_case_ , snake_case_ )
class A_ :
'''simple docstring'''
def __init__(self , *,
lowercase__ = np.inf , lowercase__ = "linear" , lowercase__ = 0.0 , ) -> None:
__UpperCAmelCase = regularization
__UpperCAmelCase = gamma
if kernel == "linear":
__UpperCAmelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__UpperCAmelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__UpperCAmelCase = F'''Unknown kernel: {kernel}'''
raise ValueError(_a )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> float:
return np.dot(_a , _a )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> None:
__UpperCAmelCase = observations
__UpperCAmelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
(__UpperCAmelCase ) = np.shape(_a )
def to_minimize(lowercase__ ) -> float:
__UpperCAmelCase = 0
(__UpperCAmelCase ) = np.shape(_a )
for i in range(_a ):
for j in range(_a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_a )
__UpperCAmelCase = LinearConstraint(_a , 0 , 0 )
__UpperCAmelCase = Bounds(0 , self.regularization )
__UpperCAmelCase = minimize(
_a , np.ones(_a ) , bounds=_a , constraints=[ly_contraint] ).x
__UpperCAmelCase = l_star
# calculating mean offset of separation plane to points
__UpperCAmelCase = 0
for i in range(_a ):
for j in range(_a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__UpperCAmelCase = s / n
def lowerCAmelCase_ (self , lowercase__ ) -> int:
__UpperCAmelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowercase ( UpperCamelCase__,UpperCamelCase__ ):
_a = 1
@register_to_config
def __init__( self , _a=2000 , _a=0.1 , _a=20 , _a=1e-3 ) -> List[Any]:
_A : Dict = None
_A : List[Any] = None
_A : Dict = None
def a__ ( self , _a , _a = None ) -> Union[str, Any]:
_A : Union[str, Any] = torch.linspace(1 , self.config.sampling_eps , _a , device=_a )
def a__ ( self , _a , _a , _a , _a=None ) -> Dict:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_A : Any = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_A : List[Any] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_A : List[str] = std.flatten()
while len(std.shape ) < len(score.shape ):
_A : List[Any] = std.unsqueeze(-1 )
_A : int = -score / std
# compute
_A : Tuple = -1.0 / len(self.timesteps )
_A : str = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_A : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_A : Union[str, Any] = beta_t.unsqueeze(-1 )
_A : Tuple = -0.5 * beta_t * x
_A : Tuple = torch.sqrt(_a )
_A : Dict = drift - diffusion**2 * score
_A : Dict = x + drift * dt
# add noise
_A : Any = randn_tensor(x.shape , layout=x.layout , generator=_a , device=x.device , dtype=x.dtype )
_A : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> Optional[Any]:
return self.config.num_train_timesteps
| 26 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 |
'''simple docstring'''
import os
import sys
__lowercase : List[Any] = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__lowercase : int = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[str] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoConfig.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : Any ):
return AutoTokenizer.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Union[str, Any] ):
return AutoModel.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : List[Any] , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return AutoModelForCausalLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[Any] ):
return AutoModelForMaskedLM.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Optional[Any] , **_SCREAMING_SNAKE_CASE : Any ):
return AutoModelForSequenceClassification.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Any , **_SCREAMING_SNAKE_CASE : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 27 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__a : List[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = 'sshleifer/tiny-gpt2'
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
__a : List[Any] = TensorFlowBenchmark(__a )
__a : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = 'sgugger/tiny-distilbert-classification'
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , only_pretrain_model=__a , )
__a : Dict = TensorFlowBenchmark(__a )
__a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : str = TensorFlowBenchmark(__a )
__a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = 'sshleifer/tiny-gpt2'
__a : Optional[Any] = AutoConfig.from_pretrained(__a )
__a : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__a , multi_process=__a , )
__a : Tuple = TensorFlowBenchmark(__a , [config] )
__a : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
__a : Optional[int] = AutoConfig.from_pretrained(__a )
__a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Tuple = TensorFlowBenchmark(__a , [config] )
__a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = 'sshleifer/tiny-gpt2'
__a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Optional[Any] = TensorFlowBenchmark(__a )
__a : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 'sshleifer/tiny-gpt2'
__a : Tuple = AutoConfig.from_pretrained(__a )
__a : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : Optional[Any] = TensorFlowBenchmark(__a , [config] )
__a : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'patrickvonplaten/t5-tiny-random'
__a : Optional[int] = AutoConfig.from_pretrained(__a )
__a : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__a , )
__a : str = TensorFlowBenchmark(__a , configs=[config] )
__a : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'sshleifer/tiny-gpt2'
__a : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__a , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__a , multi_process=__a , )
__a : int = TensorFlowBenchmark(__a )
__a : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , save_to_csv=__a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(__a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(__a , 'env.csv' ) , multi_process=__a , )
__a : int = TensorFlowBenchmark(__a )
benchmark.run()
self.assertTrue(Path(os.path.join(__a , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(__a , 'env.csv' ) ).exists() )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(__a ):
self.assertTrue(hasattr(__a , 'sequential' ) )
self.assertTrue(hasattr(__a , 'cumulative' ) )
self.assertTrue(hasattr(__a , 'current' ) )
self.assertTrue(hasattr(__a , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__a , 'log.txt' ) , log_print=__a , trace_memory_line_by_line=__a , eager_mode=__a , multi_process=__a , )
__a : Any = TensorFlowBenchmark(__a )
__a : str = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__a , 'log.txt' ) ).exists() )
| 27 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = inspect.getfile(accelerate.test_utils )
__a : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__a : Union[str, Any] = test_metrics
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.test_metrics.main()
@require_multi_gpu
def __UpperCAmelCase ( self ):
'''simple docstring'''
print(f"""Found {torch.cuda.device_count()} devices.""" )
__a : List[Any] = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 27 | 1 |
'''simple docstring'''
from typing import Any
def lowerCamelCase (_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : dict , ):
_validation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
# Creates data structures and fill initial step
__a : dict = {}
__a : dict = {}
for state in states_space:
__a : List[Any] = observations_space[0]
__a : str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a : List[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
__a : str = observations_space[o]
__a : Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a : Optional[Any] = ''
__a : List[str] = -1
for k_state in states_space:
__a : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a : List[Any] = probability
__a : Dict = k_state
# Update probabilities and pointers dicts
__a : int = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a : Optional[Any] = arg_max
# The final observation
__a : List[Any] = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1]
# argmax for given final observation
__a : str = ''
__a : str = -1
for k_state in states_space:
__a : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a : Dict = probability
__a : Union[str, Any] = k_state
__a : int = arg_max
# Process pointers backwards
__a : List[str] = last_state
__a : Union[str, Any] = []
for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ):
result.append(_SCREAMING_SNAKE_CASE )
__a : str = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ):
_validate_not_empty(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
_validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_validate_dicts(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('There\'s an empty parameter' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any ):
_validate_list(_SCREAMING_SNAKE_CASE , 'observations_space' )
_validate_list(_SCREAMING_SNAKE_CASE , 'states_space' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
__a : Tuple = F"""{var_name} must be a list"""
raise ValueError(_SCREAMING_SNAKE_CASE )
else:
for x in _object:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Any = F"""{var_name} must be a list of strings"""
raise ValueError(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , ):
_validate_dict(_SCREAMING_SNAKE_CASE , 'initial_probabilities' , _SCREAMING_SNAKE_CASE )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , 'transition_probabilities' )
_validate_nested_dict(_SCREAMING_SNAKE_CASE , 'emission_probabilities' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
_validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for x in _object.values():
_validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : type , _SCREAMING_SNAKE_CASE : bool = False ):
if not isinstance(_object , _SCREAMING_SNAKE_CASE ):
__a : Any = F"""{var_name} must be a dict"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ):
__a : List[str] = F"""{var_name} all keys must be strings"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ):
__a : str = 'nested dictionary ' if nested else ''
__a : int = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 27 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[Any] = tmp_path / 'file.csv'
__a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : str = tmp_path / 'malformed_file.csv'
__a : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = tmp_path / 'csv_with_image.csv'
__a : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Union[str, Any] = tmp_path / 'csv_with_label.csv'
__a : Any = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = tmp_path / 'csv_with_int_list.csv'
__a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : int = Csv()
__a : str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1]
__a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__a : Any = csv._generate_tables([[csv_file_with_image]] )
__a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__a : Any = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__a : List[str] = csv._generate_tables([[csv_file_with_label]] )
__a : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__a : int = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__a : Any = csv._generate_tables([[csv_file_with_int_list]] )
__a : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__a : Tuple = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase : str = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class __UpperCamelCase :
A_ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "The column name of the images in the files."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "A folder containing the training data."} )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "A folder containing the validation data."} )
A_ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = {}
if self.train_dir is not None:
__a : List[str] = self.train_dir
if self.validation_dir is not None:
__a : Union[str, Any] = self.validation_dir
__a : Union[str, Any] = data_files if data_files else None
@dataclass
class __UpperCamelCase :
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
A_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
A_ = field(default=lowerCAmelCase_ , metadata={"help": "Name or path of preprocessor config."} )
A_ = field(
default=lowerCAmelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
A_ = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
A_ = field(
default=lowerCAmelCase_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : Union[str, Any] = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCamelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__a : Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__a : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__a : str = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
__a : List[str] = ds['train'].train_test_split(data_args.train_val_split )
__a : str = split['train']
__a : List[Any] = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a : int = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__a : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__a : Optional[int] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
__a : str = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__a : Dict = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
__a : Optional[int] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
__a : Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__a : Optional[int] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__a : Dict = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
if training_args.do_train:
__a : str = ds['train'].column_names
else:
__a : str = ds['validation'].column_names
if data_args.image_column_name is not None:
__a : List[str] = data_args.image_column_name
elif "image" in column_names:
__a : Optional[Any] = 'image'
elif "img" in column_names:
__a : int = 'img'
else:
__a : List[Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__a : Tuple = image_processor.size['shortest_edge']
else:
__a : Union[str, Any] = (image_processor.size['height'], image_processor.size['width'])
__a : Dict = Compose(
[
Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_SCREAMING_SNAKE_CASE , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Optional[int] = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__a : Any = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__a : Tuple = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Compute absolute learning rate
__a : int = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__a : Tuple = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__a : Dict = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
__a : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
__a : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a : int = last_checkpoint
__a : Optional[int] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__a : Dict = trainer.evaluate()
trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
__a : List[str] = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 1 |
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowercase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : Union[int, Iterable[int]] , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : int ):
def constraint_to_multiple_of(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str=0 , _SCREAMING_SNAKE_CASE : Optional[int]=None ):
__a : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__a : Optional[int] = math.floor(val / multiple ) * multiple
if x < min_val:
__a : int = math.ceil(val / multiple ) * multiple
return x
__a : str = (output_size, output_size) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else output_size
__a , __a : List[Any] = get_image_size(_SCREAMING_SNAKE_CASE )
__a , __a : List[Any] = output_size
# determine new height and width
__a : Optional[Any] = output_height / input_height
__a : str = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__a : Optional[Any] = scale_width
else:
# fit height
__a : List[Any] = scale_height
__a : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_SCREAMING_SNAKE_CASE )
__a : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=_SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = ["pixel_values"]
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = False , __a = 1 , __a = True , __a = 1 / 255 , __a = True , __a = None , __a = None , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : Optional[Any] = size if size is not None else {'height': 384, 'width': 384}
__a : Optional[int] = get_size_dict(__a )
__a : str = do_resize
__a : Optional[Any] = size
__a : Optional[int] = keep_aspect_ratio
__a : int = ensure_multiple_of
__a : List[str] = resample
__a : str = do_rescale
__a : int = rescale_factor
__a : Optional[int] = do_normalize
__a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCAmelCase ( self , __a , __a , __a = False , __a = 1 , __a = PILImageResampling.BICUBIC , __a = None , **__a , ):
'''simple docstring'''
__a : int = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__a : str = get_resize_output_image_size(
__a , output_size=(size['height'], size['width']) , keep_aspect_ratio=__a , multiple=__a , )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __UpperCAmelCase ( self , __a , __a , __a = None , **__a , ):
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ):
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
'''simple docstring'''
__a : int = do_resize if do_resize is not None else self.do_resize
__a : Union[str, Any] = size if size is not None else self.size
__a : List[str] = get_size_dict(__a )
__a : Tuple = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__a : Tuple = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__a : int = resample if resample is not None else self.resample
__a : Dict = do_rescale if do_rescale is not None else self.do_rescale
__a : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : Dict = do_normalize if do_normalize is not None else self.do_normalize
__a : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__a : Optional[int] = image_std if image_std is not None else self.image_std
__a : Any = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__a : Optional[int] = [to_numpy_array(__a ) for image in images]
if do_resize:
__a : Optional[int] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
__a : List[str] = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__a : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__a : List[Any] = [to_channel_dimension_format(__a , __a ) for image in images]
__a : Dict = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a )
def __UpperCAmelCase ( self , __a , __a = None ):
'''simple docstring'''
__a : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a ) != len(__a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__a ):
__a : Tuple = target_sizes.numpy()
__a : Optional[int] = []
for idx in range(len(__a ) ):
__a : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__a )
__a : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__a )
else:
__a : int = logits.argmax(dim=1 )
__a : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 27 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__a : Tuple = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )]
__a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE )
__a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
__a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
__a : str = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
__a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
__a : List[Any] = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a : List[str] = test.model_tester.__class__
return model_tester
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = []
for test_class in test_classes:
__a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : str = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 27 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 27 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "AAPL" ):
__a : Union[str, Any] = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__a : List[str] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Optional[Any] = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 27 |
'''simple docstring'''
import requests
__lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here!
__lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase : Dict = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 27 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__a , self ).__init__()
__a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a )
__a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.bert(**__a ).last_hidden_state
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__a )
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : str = W_supports['sizes'].tolist()
__a : Union[str, Any] = W_supports['start_token_id'].item()
__a : Any = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Tuple = self.BERT(**__a )
__a : str = self.BERT(**__a )
__a : Any = None
__a : Dict = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__a : Optional[int] = 0
else:
__a : Union[str, Any] = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : str = torch.vstack((p_starts, p_start) )
__a : str = torch.vstack((p_ends, p_end) )
else:
__a : List[str] = p_start
__a : int = p_end
return p_starts, p_ends
| 27 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = RoCBertTokenizer
A_ = None
A_ = False
A_ = True
A_ = filter_non_english
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
__a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__a : Tuple = {}
__a : Tuple = {}
for i, value in enumerate(__a ):
__a : str = i
__a : int = i
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(__a , __a , ensure_ascii=__a )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(__a , __a , ensure_ascii=__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__a : List[str] = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(__a , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) , [5, 6, 2, 5, 7, 8] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = RoCBertBasicTokenizer(do_lower_case=__a , strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__a : Union[str, Any] = {}
for i, token in enumerate(__a ):
__a : Any = i
__a : Union[str, Any] = RoCBertWordpieceTokenizer(vocab=__a , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__a : str = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__a : Optional[Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__a : Union[str, Any] = tokenizer_r.encode_plus(
__a , return_attention_mask=__a , return_token_type_ids=__a , return_offsets_mapping=__a , add_special_tokens=__a , )
__a : Optional[Any] = tokenizer_r.do_lower_case if hasattr(__a , 'do_lower_case' ) else False
__a : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = ['的', '人', '有']
__a : Optional[int] = ''.join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a : Optional[int] = True
__a : Tuple = self.tokenizer_class.from_pretrained(__a , **__a )
__a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__a : Optional[int] = tokenizer_p.encode(__a , add_special_tokens=__a )
__a : Optional[int] = tokenizer_r.encode(__a , add_special_tokens=__a )
__a : int = tokenizer_r.convert_ids_to_tokens(__a )
__a : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
__a : List[str] = False
__a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__a : Any = self.tokenizer_class.from_pretrained(__a , **__a )
__a : Any = tokenizer_r.encode(__a , add_special_tokens=__a )
__a : Optional[int] = tokenizer_p.encode(__a , add_special_tokens=__a )
__a : int = tokenizer_r.convert_ids_to_tokens(__a )
__a : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
__a : Optional[int] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__a : List[str] = tokenizer.encode('你好' , add_special_tokens=__a )
__a : Optional[int] = tokenizer.encode('你是谁' , add_special_tokens=__a )
__a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a )
__a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__a : Any = '你好,你是谁'
__a : List[Any] = tokenizer.tokenize(__a )
__a : List[Any] = tokenizer.convert_tokens_to_ids(__a )
__a : Tuple = tokenizer.convert_tokens_to_shape_ids(__a )
__a : Optional[Any] = tokenizer.convert_tokens_to_pronunciation_ids(__a )
__a : List[Any] = tokenizer.prepare_for_model(
__a , __a , __a , add_special_tokens=__a )
__a : str = tokenizer.encode_plus(__a , add_special_tokens=__a )
self.assertEqual(__a , __a )
| 27 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "bridgetower_vision_model"
def __init__( self , __a=768 , __a=12 , __a=3 , __a=16 , __a=288 , __a=1 , __a=1E-0_5 , __a=False , __a=True , __a=False , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : Dict = hidden_size
__a : Any = num_hidden_layers
__a : Optional[int] = num_channels
__a : int = patch_size
__a : Any = image_size
__a : Union[str, Any] = initializer_factor
__a : Optional[int] = layer_norm_eps
__a : Tuple = stop_gradient
__a : int = share_layernorm
__a : str = remove_last_layer
@classmethod
def __UpperCAmelCase ( cls , __a , **__a ):
'''simple docstring'''
__a , __a : Optional[int] = cls.get_config_dict(__a , **__a )
if config_dict.get('model_type' ) == "bridgetower":
__a : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "bridgetower_text_model"
def __init__( self , __a=5_0265 , __a=768 , __a=12 , __a=12 , __a=1 , __a=3072 , __a="gelu" , __a=0.1 , __a=0.1 , __a=514 , __a=1 , __a=1E-0_5 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , **__a , ):
'''simple docstring'''
super().__init__(**__a )
__a : Union[str, Any] = vocab_size
__a : Any = hidden_size
__a : Optional[int] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : str = hidden_act
__a : Optional[int] = initializer_factor
__a : Optional[Any] = intermediate_size
__a : Any = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : int = type_vocab_size
__a : Any = layer_norm_eps
__a : Union[str, Any] = position_embedding_type
__a : Optional[int] = use_cache
__a : Dict = pad_token_id
__a : Tuple = bos_token_id
__a : int = eos_token_id
@classmethod
def __UpperCAmelCase ( cls , __a , **__a ):
'''simple docstring'''
__a , __a : Union[str, Any] = cls.get_config_dict(__a , **__a )
if config_dict.get('model_type' ) == "bridgetower":
__a : Union[str, Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__a , **__a )
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "bridgetower"
def __init__( self , __a=True , __a="gelu" , __a=768 , __a=1 , __a=1E-0_5 , __a=False , __a="add" , __a=12 , __a=6 , __a=False , __a=False , __a=None , __a=None , **__a , ):
'''simple docstring'''
__a : Dict = kwargs.pop('text_config_dict' , __a )
__a : Tuple = kwargs.pop('vision_config_dict' , __a )
super().__init__(**__a )
__a : int = share_cross_modal_transformer_layers
__a : Any = hidden_act
__a : str = hidden_size
__a : int = initializer_factor
__a : List[str] = layer_norm_eps
__a : Optional[int] = share_link_tower_layers
__a : List[Any] = link_tower_type
__a : Union[str, Any] = num_attention_heads
__a : Optional[Any] = num_hidden_layers
__a : int = tie_word_embeddings
__a : int = init_layernorm_from_vision_encoder
if text_config is None:
__a : Union[str, Any] = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
__a : Optional[Any] = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
__a : Optional[Any] = BridgeTowerTextConfig(**__a )
__a : Optional[int] = BridgeTowerVisionConfig(**__a )
@classmethod
def __UpperCAmelCase ( cls , __a , __a , **__a ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = copy.deepcopy(self.__dict__ )
__a : Any = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : Union[str, Any] = self.__class__.model_type
return output
| 27 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ort.SessionOptions()
__a : Dict = False
return options
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = 'A red cat sitting on a park bench'
__a : int = np.random.RandomState(0 )
__a : Tuple = pipe(
prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 27 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
__lowercase : List[Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__lowercase : Optional[Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ):
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__a : Optional[Any] = 'lm_head'
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Dict = value
elif weight_type == "weight_g":
__a : List[str] = value
elif weight_type == "weight_v":
__a : List[Any] = value
elif weight_type == "bias":
__a : Optional[int] = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[str] = []
__a : Optional[Any] = fairseq_model.state_dict()
__a : Dict = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__a : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : Tuple = True
else:
for key, mapped_key in MAPPING.items():
__a : int = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : str = True
if "*" in mapped_key:
__a : Dict = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : Optional[int] = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : Any = 'weight_g'
elif "weight_v" in name:
__a : List[str] = 'weight_v'
elif "bias" in name:
__a : Dict = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__a : List[Any] = 'weight'
else:
__a : str = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] ):
__a : Union[str, Any] = full_name.split('conv_layers.' )[-1]
__a : Dict = name.split('.' )
__a : Any = int(items[0] )
__a : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : List[str]=True ):
if config_path is not None:
__a : str = UniSpeechConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__a : Optional[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
__a : List[str] = Dictionary.load_from_json(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__a : int = target_dict.pad_index
__a : Dict = target_dict.bos_index
__a : Tuple = target_dict.eos_index
__a : int = len(target_dict.symbols )
__a : Union[str, Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
__a : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__a : List[str] = 42
__a : int = 43
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = WavaVecaPhonemeCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_SCREAMING_SNAKE_CASE , )
__a : List[str] = True if config.feat_extract_norm == 'layer' else False
__a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a : Dict = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = UniSpeechForCTC(_SCREAMING_SNAKE_CASE )
else:
__a : Tuple = UniSpeechForPreTraining(_SCREAMING_SNAKE_CASE )
if is_finetuned:
__a , __a , __a : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
__a , __a , __a : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__a : int = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_unispeech.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__lowercase : str = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 27 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Dict = 16
__lowercase : List[Any] = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : Dict = torch.cuda.memory_allocated()
__a : List[Any] = torch.cuda.max_memory_allocated()
__a : Tuple = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : int = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
# Initialize accelerator
__a : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Dict = config['lr']
__a : str = int(config['num_epochs'] )
__a : Optional[int] = int(config['seed'] )
__a : Any = int(config['batch_size'] )
__a : List[str] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Union[str, Any] = 1
__a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : str = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : str = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : str = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[str] = parser.parse_args()
__a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 | 1 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = tempfile.mkdtemp()
__a : Dict = 8
# DPR tok
__a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(__a , exist_ok=__a )
__a : List[str] = os.path.join(__a , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__a : Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : Any = {'unk_token': '<unk>'}
__a : int = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(__a , exist_ok=__a )
__a : str = os.path.join(__a , BART_VOCAB_FILES_NAMES['vocab_file'] )
__a : List[str] = os.path.join(__a , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.get_dummy_dataset()
__a : Dict = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__a : Union[str, Any] = dataset
__a : Dict = RagRetriever(
__a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[int] = self.get_dummy_dataset()
__a : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
__a : Any = os.path.join(self.tmpdirname , 'dataset' )
__a : Union[str, Any] = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
__a : int = RagRetriever(
__a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__a : List[Any] = RagRetriever(
__a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __a ) , )
return retriever
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
__a : Optional[Any] = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
__a : Dict = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
__a : Union[str, Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(__a , open(__a , 'wb' ) )
__a : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
__a : Optional[Any] = RagRetriever(
__a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 1
__a : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
__a : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a , __a , __a : Union[str, Any] = retriever.retrieve(__a , n_docs=__a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __a )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
__a : Union[str, Any] = self.get_dummy_dataset()
retriever.save_pretrained(__a )
__a : List[str] = RagRetriever.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__a : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Optional[Any] = retriever.retrieve(__a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = 1
__a : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
__a : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a , __a , __a : str = retriever.retrieve(__a , n_docs=__a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __a )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__a )
__a : Dict = RagRetriever.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__a : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[str] = retriever.retrieve(__a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 1
__a : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
__a : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a , __a , __a : Any = retriever.retrieve(__a , n_docs=__a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , __a )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__a )
__a : int = RagRetriever.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__a : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Optional[Any] = retriever.retrieve(__a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = 1
__a : str = self.get_dummy_legacy_index_retriever()
__a : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a , __a , __a : int = retriever.retrieve(__a , n_docs=__a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , __a )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__a )
__a : Any = RagRetriever.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__a : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Dict = retriever.retrieve(__a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCAmelCase ( self ):
'''simple docstring'''
import torch
__a : Optional[int] = 1
__a : Any = self.get_dummy_canonical_hf_index_retriever()
__a : Optional[int] = [[5, 7], [10, 11]]
__a : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[str] = retriever(__a , __a , prefix=retriever.config.generator.prefix , n_docs=__a )
__a , __a , __a : Dict = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__a , __a )
self.assertIsInstance(__a , __a )
self.assertIsInstance(__a , np.ndarray )
__a : int = retriever(
__a , __a , prefix=retriever.config.generator.prefix , n_docs=__a , return_tensors='pt' , )
__a , __a , __a , __a : List[str] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__a , torch.Tensor )
self.assertIsInstance(__a , torch.Tensor )
self.assertIsInstance(__a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.get_dpr_ctx_encoder_tokenizer()
__a : Optional[int] = 1
__a : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=__a )
retriever.set_ctx_encoder_tokenizer(__a )
__a : int = [[5, 7], [10, 11]]
__a : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Optional[Any] = retriever(__a , __a , prefix=retriever.config.generator.prefix , n_docs=__a )
self.assertEqual(
len(__a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , __a ) # check for doc token related keys in dictionary.
| 27 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__a : List[str] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__a : Union[str, Any] = get_activation('gelu' )
__a : List[str] = get_activation('gelu_10' )
__a : Optional[Any] = torch_builtin(__a )
__a : str = geluaa(__a )
__a : Tuple = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(__a ):
get_activation('bogus' )
with self.assertRaises(__a ):
get_activation(__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = get_activation('gelu' )
__a : int = 1
__a : List[str] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
__a : Tuple = acta.a
| 27 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[Any] = torch.device('cpu')
def lowerCamelCase ():
__a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = dct.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = []
for k in state_dict.keys():
__a : List[Any] = k
if ".pwconv" in k:
__a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[str] = 1_000
__a : Tuple = 'huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : Dict = [3, 3, 6, 4]
__a : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Dict = [3, 3, 9, 6]
__a : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : Dict = [4, 3, 10, 5]
__a : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Tuple = [4, 4, 12, 6]
__a : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[Any] = checkpoint
__a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Tuple = prepare_img()
__a : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a , __a : Tuple = [], []
while len(_SCREAMING_SNAKE_CASE ) > 1:
__a , __a : Tuple = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
start.append(_SCREAMING_SNAKE_CASE )
end.append(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
collection.remove(_SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
__lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip()
__lowercase : Tuple = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 27 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "umt5"
A_ = ["past_key_values"]
def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
__a : Any = vocab_size
__a : Any = d_model
__a : str = d_kv
__a : Dict = d_ff
__a : Union[str, Any] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : Tuple = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : Optional[int] = dropout_rate
__a : List[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Any = use_cache
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : Dict = act_info[-1]
__a : Dict = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_heads
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_layers
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Dict = 'past_encoder_sequence + sequence'
__a : Tuple = {0: 'batch'}
__a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 5E-4
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : List[Any] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a ):
'''simple docstring'''
super().__init__()
__a : str = nn.ModuleList(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a = None , __a = None , __a = None , __a = None , __a = False , __a = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(__a , __a , self.nets ) ):
__a , __a : Any = controlnet(
__a , __a , __a , __a , __a , __a , __a , __a , __a , __a , __a , )
# merge samples
if i == 0:
__a , __a : Optional[int] = down_samples, mid_sample
else:
__a : Dict = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__a , __a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __UpperCAmelCase ( self , __a , __a = True , __a = None , __a = False , __a = None , ):
'''simple docstring'''
__a : List[Any] = 0
__a : List[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__a , is_main_process=__a , save_function=__a , safe_serialization=__a , variant=__a , )
idx += 1
__a : List[str] = model_path_to_save + f"""_{idx}"""
@classmethod
def __UpperCAmelCase ( cls , __a , **__a ):
'''simple docstring'''
__a : List[str] = 0
__a : str = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__a : Union[str, Any] = pretrained_model_path
while os.path.isdir(__a ):
__a : Any = ControlNetModel.from_pretrained(__a , **__a )
controlnets.append(__a )
idx += 1
__a : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__a )} controlnets loaded from {pretrained_model_path}.""" )
if len(__a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__a )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__a )
| 27 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Optional[Any] = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
for attribute in key.split('.' ):
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Union[str, Any] = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : int = []
__a : List[str] = fairseq_model.state_dict()
__a : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a : int = None
for name, value in fairseq_dict.items():
__a : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : List[str] = True
elif name.split('.' )[0] == "proj":
__a : Tuple = fairseq_model.proj
__a : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : List[Any] = True
if "*" in mapped_key:
__a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : List[Any] = 'weight_g'
elif "weight_v" in name:
__a : List[Any] = 'weight_v'
elif "bias" in name:
__a : Optional[Any] = 'bias'
elif "weight" in name:
__a : Tuple = 'weight'
else:
__a : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : List[str] = full_name.split('conv_layers.' )[-1]
__a : Any = name.split('.' )
__a : List[str] = int(items[0] )
__a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a , __a : List[str] = emb.weight.shape
__a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Tuple = [line.split(' ' )[0] for line in lines]
__a : int = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ):
__a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__a : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
__a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
__a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
__a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
__a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
__a : int = False
# add projection layer
__a : str = nn.Parameter(projection_layer.weight )
__a : Any = nn.Parameter(projection_layer.bias )
__a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = hf_wavavec.config.to_dict()
__a : Tuple = tokenizer.pad_token_id
__a : Optional[int] = tokenizer.bos_token_id
__a : Union[str, Any] = tokenizer.eos_token_id
__a : Tuple = 'speech_to_text_2'
__a : Tuple = 'wav2vec2'
__a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : List[str] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__a : int = ''
__a : Any = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__a , __a : Union[str, Any] = 0, 0
# length[i] shows the length of palindromic substring with center i
__a : int = [1 for i in range(len(_SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
__a : Optional[int] = 0
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
__a : List[str] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__a : List[Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__a : List[Any] = j - k + 1 # noqa: E741
__a : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
__a : Tuple = length[j]
__a : List[str] = j
# create that string
__a : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__lowercase : Dict = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__lowercase : Tuple = torch.device('cuda', local_rank)
__lowercase : Optional[int] = socket.gethostname()
__lowercase : List[str] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : str = dist.get_rank()
__lowercase : Union[str, Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 27 | 1 |
'''simple docstring'''
from math import ceil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
__a : str = list(range(0 , _SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__a : int = []
for i in device_map_blocks:
if device_map_blocks.count(_SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_SCREAMING_SNAKE_CASE )
# Missing blocks
__a : int = [i for i in blocks if i not in device_map_blocks]
__a : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(_SCREAMING_SNAKE_CASE ) )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ):
__a : Union[str, Any] = list(range(_SCREAMING_SNAKE_CASE ) )
__a : Tuple = int(ceil(n_layers / len(_SCREAMING_SNAKE_CASE ) ) )
__a : Dict = [layers[i : i + n_blocks] for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.