code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
UpperCAmelCase__ : Tuple = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 25 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
while second != 0:
A__ = first & second
first ^= second
A__ = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Any = int(input("""Enter the first number: """).strip())
_lowerCamelCase : List[Any] = int(input("""Enter the second number: """).strip())
print(F'''{add(first, second) = }''')
| 369 |
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase__ : int) ->Union[str, Any]:
'''simple docstring'''
A__ = n
A__ = [None] * self.n
A__ = 0 # index of the first element
A__ = 0
A__ = 0
def __len__( self : List[Any]) ->int:
'''simple docstring'''
return self.size
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->bool:
'''simple docstring'''
return self.size == 0
def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : str) ->Any:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''')
A__ = data
A__ = (self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''')
A__ = self.array[self.front]
A__ = None
A__ = (self.front + 1) % self.n
self.size -= 1
return temp
| 231 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "megatron-bert"
def __init__( self , _UpperCAmelCase=29056 , _UpperCAmelCase=1024 , _UpperCAmelCase=24 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , **_UpperCAmelCase , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowercase__: str = vocab_size
lowercase__: Tuple = hidden_size
lowercase__: List[str] = num_hidden_layers
lowercase__: Dict = num_attention_heads
lowercase__: Union[str, Any] = hidden_act
lowercase__: Optional[Any] = intermediate_size
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: List[Any] = max_position_embeddings
lowercase__: List[str] = type_vocab_size
lowercase__: Union[str, Any] = initializer_range
lowercase__: str = layer_norm_eps
lowercase__: int = position_embedding_type
lowercase__: Optional[int] = use_cache
| 177 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : Dict = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCamelCase : Dict = {
"gpt2": 1_0_2_4,
"gpt2-medium": 1_0_2_4,
"gpt2-large": 1_0_2_4,
"gpt2-xl": 1_0_2_4,
"distilgpt2": 1_0_2_4,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = GPTaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase="<|endoftext|>" , __UpperCAmelCase=False , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = kwargs.pop('add_bos_token' , __UpperCAmelCase )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [self.eos_token_id] )
if len(__UpperCAmelCase ) > self.model_max_length:
__UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 316 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case = ["input_values", "padding_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2_4000 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->List[str]:
'''simple docstring'''
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
A_ : List[str] = chunk_length_s
A_ : int = overlap
@property
def _snake_case ( self )->Any:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self )->Tuple:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )->Dict:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
A_ : Optional[int] = True
A_ : str = bool(
isinstance(__A , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
A_ : int = [np.asarray(__A , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__A , np.ndarray ):
A_ : List[Any] = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
A_ : Dict = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Union[str, Any] = [np.asarray(__A ).T]
# verify inputs are valid
for idx, example in enumerate(__A ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
A_ : Tuple = None
A_ : str = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
A_ : Optional[Any] = min(array.shape[0] for array in raw_audio )
A_ : Tuple = int(np.floor(max_length / self.chunk_stride ) )
A_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
A_ : Union[str, Any] = max(array.shape[0] for array in raw_audio )
A_ : Union[str, Any] = int(np.ceil(max_length / self.chunk_stride ) )
A_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length
A_ : Optional[int] = '''max_length'''
else:
A_ : str = input_values
# normal padding on batch
if padded_inputs is None:
A_ : Optional[int] = self.pad(
__A , max_length=__A , truncation=__A , padding=__A , return_attention_mask=__A , )
if padding:
A_ : Optional[Any] = padded_inputs.pop('''attention_mask''' )
A_ : Dict = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
A_ : Optional[int] = example[..., None]
input_values.append(example.T )
A_ : List[Any] = input_values
if return_tensors is not None:
A_ : List[Any] = padded_inputs.convert_to_tensors(__A )
return padded_inputs
| 360 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , )->Any:
'''simple docstring'''
A_ : List[Any] = parent
A_ : int = batch_size
A_ : str = seq_length
A_ : int = is_training
A_ : Any = use_token_type_ids
A_ : Union[str, Any] = use_labels
A_ : Any = vocab_size
A_ : Dict = hidden_size
A_ : Dict = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Dict = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : str = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : Union[str, Any] = num_labels
A_ : List[str] = num_choices
A_ : Union[str, Any] = scope
A_ : Any = self.vocab_size - 1
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Any = None
if self.use_token_type_ids:
A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : str = None
A_ : Union[str, Any] = None
A_ : Optional[int] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A_ : Optional[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
A_ : int = OpenAIGPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : int = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
A_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->List[str]:
'''simple docstring'''
A_ : int = OpenAIGPTLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Tuple = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->Optional[int]:
'''simple docstring'''
A_ : List[Any] = OpenAIGPTDoubleHeadsModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : str = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : Any = self.num_labels
A_ : List[Any] = OpenAIGPTForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Optional[int] = config_and_inputs
A_ : int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False )->Optional[int]:
'''simple docstring'''
A_ : Optional[Any] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
A_ : List[Any] = inputs_dict['''labels''']
A_ : Any = inputs_dict['''labels''']
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : Any = OpenAIGPTModelTester(self )
A_ : int = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Any:
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self )->List[str]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = OpenAIGPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president is
A_ : Union[str, Any] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : Dict = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist() , _SCREAMING_SNAKE_CASE )
| 65 | 0 |
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowercase__ ( _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase : Any = np.inf
def set_batch_size(_UpperCAmelCase ) -> None:
nonlocal batch_size
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : Tuple = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : int = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and feature.dtype == "binary":
lowercase : List[str] = min(_UpperCAmelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(_UpperCAmelCase , _UpperCAmelCase )
return None if batch_size is np.inf else batch_size
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Union[str, Any], lowerCAmelCase : NestedDataStructureLike[PathLike], lowerCAmelCase : Optional[NamedSplit] = None, lowerCAmelCase : Optional[Features] = None, lowerCAmelCase : str = None, lowerCAmelCase : bool = False, lowerCAmelCase : bool = False, lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : str, ) -> Any:
super().__init__(
lowerCAmelCase, split=lowerCAmelCase, features=lowerCAmelCase, cache_dir=lowerCAmelCase, keep_in_memory=lowerCAmelCase, streaming=lowerCAmelCase, num_proc=lowerCAmelCase, **lowerCAmelCase, )
lowercase : str = path_or_paths if isinstance(lowerCAmelCase, lowerCAmelCase ) else {self.split: path_or_paths}
lowercase : str = _PACKAGED_DATASETS_MODULES['parquet'][1]
lowercase : Any = Parquet(
cache_dir=lowerCAmelCase, data_files=lowerCAmelCase, features=lowerCAmelCase, hash=lowerCAmelCase, **lowerCAmelCase, )
def lowercase ( self : int ) -> List[str]:
# Build iterable dataset
if self.streaming:
lowercase : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase : int = None
lowercase : str = None
lowercase : Tuple = None
lowercase : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase, download_mode=lowerCAmelCase, verification_mode=lowerCAmelCase, base_path=lowerCAmelCase, num_proc=self.num_proc, )
lowercase : str = self.builder.as_dataset(
split=self.split, verification_mode=lowerCAmelCase, in_memory=self.keep_in_memory )
return dataset
class a__ :
def __init__( self : Any, lowerCAmelCase : Dataset, lowerCAmelCase : Union[PathLike, BinaryIO], lowerCAmelCase : Optional[int] = None, **lowerCAmelCase : Optional[int], ) -> Dict:
lowercase : Union[str, Any] = dataset
lowercase : Tuple = path_or_buf
lowercase : Union[str, Any] = batch_size or get_writer_batch_size(dataset.features )
lowercase : Any = parquet_writer_kwargs
def lowercase ( self : int ) -> int:
lowercase : int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf, (str, bytes, os.PathLike) ):
with open(self.path_or_buf, 'wb+' ) as buffer:
lowercase : Any = self._write(file_obj=lowerCAmelCase, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs )
else:
lowercase : Union[str, Any] = self._write(file_obj=self.path_or_buf, batch_size=lowerCAmelCase, **self.parquet_writer_kwargs )
return written
def lowercase ( self : Tuple, lowerCAmelCase : BinaryIO, lowerCAmelCase : int, **lowerCAmelCase : Tuple ) -> int:
lowercase : Tuple = 0
lowercase : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf', lowerCAmelCase )
lowercase : Tuple = self.dataset.features.arrow_schema
lowercase : Optional[Any] = pq.ParquetWriter(lowerCAmelCase, schema=lowerCAmelCase, **lowerCAmelCase )
for offset in logging.tqdm(
range(0, len(self.dataset ), lowerCAmelCase ), unit='ba', disable=not logging.is_progress_bar_enabled(), desc='Creating parquet from Arrow format', ):
lowercase : Dict = query_table(
table=self.dataset._data, key=slice(lowerCAmelCase, offset + batch_size ), indices=self.dataset._indices if self.dataset._indices is not None else None, )
writer.write_table(lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 255 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
_enforce_args(_UpperCAmelCase , _UpperCAmelCase )
if n == 0:
return 0
lowercase : Tuple = float('-inf' )
for i in range(1 , n + 1 ):
lowercase : Union[str, Any] = max(
_UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , _UpperCAmelCase ) )
return max_revue
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
_enforce_args(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Any = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase : Optional[int] = float('-inf' )
for i in range(1 , n + 1 ):
lowercase : Dict = max(
_UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _UpperCAmelCase , _UpperCAmelCase ) , )
lowercase : int = max_revenue
return max_rev[n]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
_enforce_args(_UpperCAmelCase , _UpperCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase : int = [float('-inf' ) for _ in range(n + 1 )]
lowercase : Union[str, Any] = 0
for i in range(1 , n + 1 ):
lowercase : Any = max_rev[i]
for j in range(1 , i + 1 ):
lowercase : Optional[int] = max(_UpperCAmelCase , prices[j - 1] + max_rev[i - j] )
lowercase : Optional[Any] = max_revenue_i
return max_rev[n]
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if n < 0:
lowercase : Tuple = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(_UpperCAmelCase )
if n > len(_UpperCAmelCase ):
lowercase : Dict = (
'Each integral piece of rod must have a corresponding price. '
f'''Got n = {n} but length of prices = {len(_UpperCAmelCase )}'''
)
raise ValueError(_UpperCAmelCase )
def lowercase__ ( ) -> str:
'''simple docstring'''
lowercase : Optional[Any] = [6, 10, 12, 15, 20, 23]
lowercase : Optional[Any] = len(_UpperCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase : List[Any] = 36
lowercase : Union[str, Any] = top_down_cut_rod(_UpperCAmelCase , _UpperCAmelCase )
lowercase : Optional[Any] = bottom_up_cut_rod(_UpperCAmelCase , _UpperCAmelCase )
lowercase : List[str] = naive_cut_rod_recursive(_UpperCAmelCase , _UpperCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 255 | 1 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _UpperCAmelCase : Optional[NestedDataStructureLike[PathLike]] = None , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Any , ) -> str:
"""simple docstring"""
lowercase__ = path_or_paths
lowercase__ = split if split or isinstance(_UpperCAmelCase , _UpperCAmelCase ) else """train"""
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def lowerCamelCase__ (self : Tuple ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Tuple , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : int , ) -> Any:
"""simple docstring"""
lowercase__ = features
lowercase__ = cache_dir
lowercase__ = keep_in_memory
lowercase__ = streaming
lowercase__ = num_proc
lowercase__ = kwargs
@abstractmethod
def lowerCamelCase__ (self : Optional[Any] ) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 146 |
def UpperCamelCase ( __magic_name__ : str ) -> List[str]: # noqa: E741
"""simple docstring"""
lowercase__ = len(__magic_name__ )
lowercase__ = 0
lowercase__ = [0] * n
lowercase__ = [False] * n
lowercase__ = [False] * n
def dfs(__magic_name__ : str , __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Any ):
if parent == root:
out_edge_count += 1
lowercase__ = True
lowercase__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase__ = dfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase__ = True
# AP found via cycle
if at == low[to]:
lowercase__ = True
else:
lowercase__ = min(low[at] , __magic_name__ )
return out_edge_count
for i in range(__magic_name__ ):
if not visited[i]:
lowercase__ = 0
lowercase__ = dfs(__magic_name__ , __magic_name__ , -1 , __magic_name__ )
lowercase__ = out_edge_count > 1
for x in range(len(__magic_name__ ) ):
if is_art[x] is True:
print(__magic_name__ )
# Adjacency list of graph
A : List[str] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 146 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
A__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
A__ = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
A__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
A__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
A__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
A__ = tf.placeholder('''float64''' , [dim] )
A__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
A__ = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
A__ = tf.placeholder('''int32''' )
A__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
A__ = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
A__ = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
A__ = tf.placeholder('''float''' , [dim] )
A__ = tf.placeholder('''float''' , [dim] )
A__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
A__ = tf.placeholder('''float''' , [noofclusters] )
A__ = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
A__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
A__ = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
A__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
A__ = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
A__ = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
A__ = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
A__ = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
A__ = sess.run(lowercase_ )
A__ = sess.run(lowercase_ )
return centroids, assignments
| 14 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
_a = logging.get_logger(__name__)
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> None:
"""simple docstring"""
_UpperCAmelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), F"""{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
_a = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
_a = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
try:
_UpperCAmelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(__lowerCAmelCase ) )
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> List[int]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __A ( __lowerCAmelCase , __lowerCAmelCase = "student" , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , )-> Tuple[PreTrainedModel, List[int], List[int]]:
"""simple docstring"""
_UpperCAmelCase = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), F"""teacher must be a model or string got type {type(__lowerCAmelCase )}"""
_UpperCAmelCase = teacher.config.to_diff_dict()
try:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase , _UpperCAmelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase = teacher_e
if d is None:
_UpperCAmelCase = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
_UpperCAmelCase = teacher.config_class(**__lowerCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase , _UpperCAmelCase = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
_UpperCAmelCase = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_UpperCAmelCase = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 39 | 0 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCamelCase : str = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCamelCase : int = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCamelCase : Optional[Any] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCamelCase : List[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Any ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def A ( self : Union[str, Any] , _a : Dict ) -> Optional[Any]:
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def A ( self : int , _a : int , _a : List[str] , _a : Any=0.9 , _a : int=3 , _a : List[str]=0.5 ) -> Optional[int]:
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(
word_tokenize(_lowercase ) , word_tokenize(_lowercase ) , alpha=_lowercase , beta=_lowercase , gamma=_lowercase )
for ref, pred in zip(_lowercase , _lowercase )
]
else:
_SCREAMING_SNAKE_CASE =[
meteor_score.single_meteor_score(_lowercase , _lowercase , alpha=_lowercase , beta=_lowercase , gamma=_lowercase )
for ref, pred in zip(_lowercase , _lowercase )
]
return {"meteor": np.mean(_lowercase )}
| 370 |
'''simple docstring'''
import os
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =os.path.dirname(os.path.realpath(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , 'triangle.txt' )
with open(_UpperCamelCase ) as f:
_SCREAMING_SNAKE_CASE =f.readlines()
_SCREAMING_SNAKE_CASE =[]
for line in triangle:
_SCREAMING_SNAKE_CASE =[]
for number in line.strip().split(' ' ):
numbers_from_line.append(int(_UpperCamelCase ) )
a.append(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
for j in range(len(a[i] ) ):
_SCREAMING_SNAKE_CASE =a[i - 1][j] if j != len(a[i - 1] ) else 0
_SCREAMING_SNAKE_CASE =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCamelCase , _UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 114 | 0 |
"""simple docstring"""
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'{price_plus_tax(100, 0.25) = }')
print(F'{price_plus_tax(1_25.50, 0.05) = }')
| 165 |
"""simple docstring"""
def A ( snake_case__ ):
'''simple docstring'''
assert isinstance(snake_case__ , snake_case__ ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE__ = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = sylvester(number - 1 )
SCREAMING_SNAKE_CASE__ = num - 1
SCREAMING_SNAKE_CASE__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 165 | 1 |
from __future__ import annotations
class __snake_case :
def __init__( self : Tuple , _lowercase : int = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = key
def __a ( self : List[Any] , _lowercase : str , _lowercase : int ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(_lowercase ) ^ key ) for ch in content]
def __a ( self : List[str] , _lowercase : str , _lowercase : int ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(_lowercase ) ^ key ) for ch in content]
def __a ( self : List[str] , _lowercase : str , _lowercase : int = 0 ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
SCREAMING_SNAKE_CASE__ = """"""
for ch in content:
ans += chr(ord(_lowercase ) ^ key )
return ans
def __a ( self : Optional[int] , _lowercase : str , _lowercase : int = 0 ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
SCREAMING_SNAKE_CASE__ = """"""
for ch in content:
ans += chr(ord(_lowercase ) ^ key )
return ans
def __a ( self : Union[str, Any] , _lowercase : str , _lowercase : int = 0 ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
try:
with open(_lowercase ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_lowercase , _lowercase ) )
except OSError:
return False
return True
def __a ( self : Union[str, Any] , _lowercase : str , _lowercase : int ):
"""simple docstring"""
assert isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
try:
with open(_lowercase ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_lowercase , _lowercase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 204 | import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
SCREAMING_SNAKE_CASE__ = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE__ = OmegaConf.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE__ = {}
with safe_open(__UpperCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE__ = f.get_tensor(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["""state_dict"""]
# Convert the VAE model.
SCREAMING_SNAKE_CASE__ = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__lowerCamelCase : Optional[int] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 204 | 1 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase_ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowercase_ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowercase_ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowercase_ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : str )-> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'predictions': datasets.Value('string',id='sequence' ),
'references': datasets.Value('string',id='sequence' ),
} ),codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'],reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
],)
def snake_case__ ( self : Union[str, Any],lowercase_ : Optional[int] )-> Any:
'''simple docstring'''
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def snake_case__ ( self : int,lowercase_ : str,lowercase_ : Tuple,lowercase_ : Any=0.9,lowercase_ : Union[str, Any]=3,lowercase_ : Tuple=0.5 )-> Union[str, Any]:
'''simple docstring'''
if NLTK_VERSION >= version.Version('3.6.5' ):
A__ = [
meteor_score.single_meteor_score(
word_tokenize(lowercase_ ),word_tokenize(lowercase_ ),alpha=lowercase_,beta=lowercase_,gamma=lowercase_ )
for ref, pred in zip(lowercase_,lowercase_ )
]
else:
A__ = [
meteor_score.single_meteor_score(lowercase_,lowercase_,alpha=lowercase_,beta=lowercase_,gamma=lowercase_ )
for ref, pred in zip(lowercase_,lowercase_ )
]
return {"meteor": np.mean(lowercase_ )}
| 7 |
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" ,set() )
@pytest.fixture
def UpperCAmelCase_ ( __lowerCamelCase : Any ):
class a_ :
def __init__( self : int , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = metric_id
class a_ :
__A = [MetricMock(_lowerCAmelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" ,HfhMock() )
@pytest.mark.parametrize(
"func, args" ,[(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : int ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Tuple ):
if "tmp_path" in args:
lowercase_ :Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__lowerCamelCase ,match="https://huggingface.co/docs/evaluate" ):
func(*__lowerCamelCase )
| 223 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""pixel_values"""]
def __init__( self : Union[str, Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : List[str] , ) -> None:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
snake_case : Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
snake_case : Optional[Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
snake_case : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
snake_case : str = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
snake_case : Optional[int] = do_resize
snake_case : List[Any] = size
snake_case : Any = resample
snake_case : Optional[int] = do_center_crop
snake_case : int = crop_size
snake_case : int = do_rescale
snake_case : Optional[Any] = rescale_factor
snake_case : str = do_normalize
snake_case : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray:
"""simple docstring"""
snake_case : Union[str, Any] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case : List[str] = int((256 / 224) * size['''shortest_edge'''] )
snake_case : Optional[Any] = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
snake_case : Optional[Any] = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
UpperCamelCase__ , size=(size_dict['''height'''], size_dict['''width''']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray:
"""simple docstring"""
snake_case : int = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : Any , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Any , ) -> BatchFeature:
"""simple docstring"""
snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case : Optional[int] = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : List[str] = image_mean if image_mean is not None else self.image_mean
snake_case : Any = image_std if image_std is not None else self.image_std
snake_case : List[str] = size if size is not None else self.size
snake_case : Optional[int] = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
snake_case : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case : Tuple = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
snake_case : str = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
snake_case : int = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
snake_case : Optional[int] = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
snake_case : Dict = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
snake_case : int = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
snake_case : int = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
snake_case : str = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 83 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> list:
'''simple docstring'''
snake_case : Any = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case : Optional[int] = collection[i]
snake_case : str = 0
snake_case : List[Any] = i - 1
while low <= high:
snake_case : List[Any] = (low + high) // 2
if val < collection[mid]:
snake_case : List[str] = mid - 1
else:
snake_case : Optional[int] = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case : Dict = collection[j - 1]
snake_case : Union[str, Any] = val
return collection
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 83 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = git.Repo(search_parent_directories=UpperCamelCase_ )
UpperCamelCase = {
"""repo_id""": str(UpperCamelCase_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(UpperCamelCase_ , """git_log.json""" ) , """w""" ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ , indent=4 )
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if params.n_gpu <= 0:
UpperCamelCase = 0
UpperCamelCase = -1
UpperCamelCase = True
UpperCamelCase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCamelCase = int(os.environ["""WORLD_SIZE"""] )
UpperCamelCase = int(os.environ["""N_GPU_NODE"""] )
UpperCamelCase = int(os.environ["""RANK"""] )
# number of nodes / node ID
UpperCamelCase = params.world_size // params.n_gpu_per_node
UpperCamelCase = params.global_rank // params.n_gpu_per_node
UpperCamelCase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCamelCase = 1
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCamelCase = params.node_id == 0 and params.local_rank == 0
UpperCamelCase = params.n_nodes > 1
# summary
UpperCamelCase = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 343 | import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) )
UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> List[Any]:
'''simple docstring'''
return (generate_random_hand() for _ in range(UpperCamelCase_ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = PokerHand(UpperCamelCase_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(UpperCamelCase_ )
UpperCamelCase = chain(sorted(UpperCamelCase_ ) )
for index, hand in enumerate(UpperCamelCase_ ):
assert hand == poker_hands[index]
def lowercase( ) -> Union[str, Any]:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCamelCase_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> str:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" )
with open(UpperCamelCase_ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ )
UpperCamelCase = player.compare_with(UpperCamelCase_ )
if output == "Win":
answer += 1
assert answer == 376
| 343 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
_A = logging.get_logger(__name__)
class A ( a__ ):
def __init__( self, UpperCamelCase__=None, **UpperCamelCase__ ):
"""simple docstring"""
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''', UpperCamelCase__, )
super().__init__(args=UpperCamelCase__, **UpperCamelCase__ )
| 370 |
import pprint
import requests
_A = '''https://zenquotes.io/api'''
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __UpperCamelCase ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 167 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '''▁'''
lowerCamelCase = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
lowerCamelCase = {
'''google/reformer-crime-and-punishment''': 52_4288,
}
class __magic_name__ ( _lowercase ):
'''simple docstring'''
lowerCamelCase__ : Any = VOCAB_FILES_NAMES
lowerCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_, lowercase_="</s>", lowercase_="<unk>", lowercase_=[], lowercase_ = None, **lowercase_, ) -> None:
"""simple docstring"""
a__ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A_, unk_token=A_, additional_special_tokens=A_, sp_model_kwargs=self.sp_model_kwargs, **A_, )
a__ =vocab_file
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self ) -> Dict[str, int]:
"""simple docstring"""
a__ ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
"""simple docstring"""
a__ =self.__dict__.copy()
a__ =None
return state
def __setstate__( self, lowercase_ ) -> str:
"""simple docstring"""
a__ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
a__ ={}
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self, lowercase_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_, out_type=A_ )
def _UpperCAmelCase ( self, lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(A_ )
def _UpperCAmelCase ( self, lowercase_ ) -> Dict:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
a__ =self.sp_model.IdToPiece(A_ )
return token
def _UpperCAmelCase ( self, lowercase_ ) -> Dict:
"""simple docstring"""
a__ =[]
a__ =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
a__ =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ =os.path.join(
A_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_, '''wb''' ) as fi:
a__ =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 188 |
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : list[int] ) -> None:
A = len(A_ )
A = [0] * len_array
if len_array > 0:
A = array[0]
for i in range(1 ,A_ ):
A = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> bool:
A = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(A_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Dict= logging.get_logger(__name__)
_a : List[Any]= {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Any = """swin2sr"""
UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__(self : Optional[int] , _A : str=64 , _A : Optional[Any]=1 , _A : str=3 , _A : Optional[int]=1_80 , _A : Any=[6, 6, 6, 6, 6, 6] , _A : Union[str, Any]=[6, 6, 6, 6, 6, 6] , _A : List[str]=8 , _A : Union[str, Any]=2.0 , _A : str=True , _A : List[str]=0.0 , _A : List[Any]=0.0 , _A : Optional[Any]=0.1 , _A : str="gelu" , _A : Optional[int]=False , _A : List[Any]=0.02 , _A : List[str]=1E-5 , _A : str=2 , _A : List[Any]=1.0 , _A : Tuple="1conv" , _A : Dict="pixelshuffle" , **_A : Optional[int] , ) -> Optional[int]:
super().__init__(**_A)
__snake_case : Optional[int] = image_size
__snake_case : Tuple = patch_size
__snake_case : List[Any] = num_channels
__snake_case : Tuple = embed_dim
__snake_case : Optional[int] = depths
__snake_case : Dict = len(_A)
__snake_case : List[str] = num_heads
__snake_case : Optional[Any] = window_size
__snake_case : Optional[Any] = mlp_ratio
__snake_case : Dict = qkv_bias
__snake_case : Optional[int] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Dict = drop_path_rate
__snake_case : str = hidden_act
__snake_case : Union[str, Any] = use_absolute_embeddings
__snake_case : Any = layer_norm_eps
__snake_case : List[str] = initializer_range
__snake_case : str = upscale
__snake_case : Any = img_range
__snake_case : List[str] = resi_connection
__snake_case : Optional[Any] = upsampler
| 95 | """simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a : int= False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Optional[Any]) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : int) -> Tuple:
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__snake_case : List[Any] = torch.manual_seed(0)
__snake_case : Optional[int] = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A)
__snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Tuple = generator.manual_seed(0)
__snake_case : int = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase (self : Optional[Any]) -> Optional[int]:
__snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Tuple = 'cyberpunk 2077'
__snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__snake_case : str = torch.manual_seed(0)
__snake_case : Union[str, Any] = pipe.dual_guided(
prompt=_A , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Tuple = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
__snake_case : List[str] = 'A painting of a squirrel eating a burger '
__snake_case : str = torch.manual_seed(0)
__snake_case : str = pipe.text_to_image(
prompt=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy').images
__snake_case : str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
__snake_case : List[str] = pipe.image_variation(_A , generator=_A , output_type='numpy').images
__snake_case : Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : List[Any] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
| 95 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = KandinskyVaaPriorPipeline
_A = ['prompt']
_A = ['prompt', 'negative_prompt']
_A = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_A = False
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
return 3_2
@property
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return 3_2
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
return self.time_input_dim
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self :Tuple ) -> str:
return 1_0_0
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowerCamelCase ( self :str ) -> int:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self :int ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase : Optional[int] = {
"num_attention_heads": 2,
"attention_head_dim": 1_2,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
__UpperCamelCase : List[Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , )
__UpperCamelCase : int = CLIPVisionModelWithProjection(a )
return model
@property
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
__UpperCamelCase : List[str] = CLIPImageProcessor(
crop_size=2_2_4 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_2_4 , )
return image_processor
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : str = self.dummy_prior
__UpperCamelCase : int = self.dummy_image_encoder
__UpperCamelCase : Tuple = self.dummy_text_encoder
__UpperCamelCase : int = self.dummy_tokenizer
__UpperCamelCase : Optional[Any] = self.dummy_image_processor
__UpperCamelCase : int = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=a , clip_sample_range=10.0 , )
__UpperCamelCase : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _lowerCamelCase ( self :Optional[Any] , a :int , a :Union[str, Any]=0 ) -> Any:
if str(a ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(a )
else:
__UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a )
__UpperCamelCase : int = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : int = "cpu"
__UpperCamelCase : List[str] = self.get_dummy_components()
__UpperCamelCase : List[str] = self.pipeline_class(**a )
__UpperCamelCase : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__UpperCamelCase : int = pipe(**self.get_dummy_inputs(a ) )
__UpperCamelCase : int = output.image_embeds
__UpperCamelCase : Optional[int] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
__UpperCamelCase : Union[str, Any] = image[0, -1_0:]
__UpperCamelCase : List[str] = image_from_tuple[0, -1_0:]
assert image.shape == (1, 3_2)
__UpperCamelCase : List[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase : str = torch_device == "cpu"
__UpperCamelCase : List[str] = True
__UpperCamelCase : List[Any] = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def _lowerCamelCase ( self :Any ) -> int:
__UpperCamelCase : Optional[Any] = torch_device == "cpu"
__UpperCamelCase : Dict = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , ) | 232 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase : Optional[Any] = TypeVar('T')
class lowerCamelCase__ ( Generic[T]):
'''simple docstring'''
_A = 42 # Cache store of keys
_A = 42 # References of the keys in cache
_A = 1_0 # Maximum capacity of cache
def __init__( self :Optional[Any] , a :int ) -> None:
__UpperCamelCase : Union[str, Any] = deque()
__UpperCamelCase : str = set()
if not n:
__UpperCamelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
__UpperCamelCase : Any = n
def _lowerCamelCase ( self :Tuple , a :T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCamelCase : int = self.dq_store.pop()
self.key_reference.remove(a )
else:
self.dq_store.remove(a )
self.dq_store.appendleft(a )
self.key_reference.add(a )
def _lowerCamelCase ( self :Any ) -> None:
for k in self.dq_store:
print(a )
def __repr__( self :Tuple ) -> str:
return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 232 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
snake_case_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
snake_case_ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
snake_case_ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ (datasets.Metric ):
def snake_case_ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def snake_case_ ( self):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float')),
"references": datasets.Sequence(datasets.Value('float')),
}
else:
return {
"predictions": datasets.Value('float'),
"references": datasets.Value('float'),
}
def snake_case_ ( self , a , a , a=None , a="uniform_average" , a=True):
lowercase__ : List[Any] = mean_squared_error(
a , a , sample_weight=a , multioutput=a , squared=a)
return {"mse": mse}
| 216 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216 | 1 |
import argparse
A : Optional[int] = 'docs/source/_static/js/custom.js'
def __lowerCAmelCase ( a__ ) -> Union[str, Any]:
with open(a__ , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
__a = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__a = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(a__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(a__ )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
A : Dict = parser.parse_args()
update_custom_js(args.version) | 6 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE : Dict = '''examples/'''
SCREAMING_SNAKE_CASE : str = {
'''examples''': (re.compile(R'''^check_min_version\(\"[^\"]+\"\)\s*$''', re.MULTILINE), '''check_min_version(\"VERSION\")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+\"([^\"]+)\"\s*$''', re.MULTILINE), '''__version__ = \"VERSION\"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*\"[^\"]+\",''', re.MULTILINE), R'''\1version=\"VERSION\",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*\"[^\"]+\"$''', re.MULTILINE), '''release = \"VERSION\"\n'''),
}
SCREAMING_SNAKE_CASE : str = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
SCREAMING_SNAKE_CASE : Optional[int] = '''README.md'''
def __UpperCAmelCase ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : int ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase = REPLACE_PATTERNS[pattern]
_lowerCAmelCase = replace.replace("""VERSION""" , lowercase__ )
_lowerCAmelCase = re_pattern.sub(lowercase__ , lowercase__ )
with open(lowercase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowercase__ )
def __UpperCAmelCase ( snake_case_ : Any ) -> int:
"""simple docstring"""
for folder, directories, fnames in os.walk(lowercase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern="""examples""" )
def __UpperCAmelCase ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Any:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase__ , lowercase__ , lowercase__ )
if not patch:
update_version_in_examples(lowercase__ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = '🤗 Transformers currently provides the following architectures'
_lowerCAmelCase = '1. Want to contribute a new model?'
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_lowerCAmelCase = f.readlines()
# Find the start of the list.
_lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_lowerCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(lowercase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowercase__ )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_lowerCAmelCase = f.read()
_lowerCAmelCase = REPLACE_PATTERNS['init'][0].search(lowercase__ ).groups()[0]
return packaging.version.parse(lowercase__ )
def __UpperCAmelCase ( snake_case_ : Optional[Any]=False ) -> int:
"""simple docstring"""
_lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can\'t create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_lowerCAmelCase = default_version.base_version
elif patch:
_lowerCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_lowerCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_lowerCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(lowercase__ ) == 0:
_lowerCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(lowercase__ , patch=lowercase__ )
if not patch:
print("""Cleaning main README, don\'t forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = get_version()
_lowerCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(lowercase__ ) == 0:
_lowerCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(lowercase__ )
print("""Cleaning main README, don\'t forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
SCREAMING_SNAKE_CASE : int = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 362 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''') | 317 | 0 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'decord' )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : Union[str, Any] = {}
if frame_sampling_rate is not None:
__lowerCAmelCase : Optional[int] = frame_sampling_rate
if num_frames is not None:
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Any = {}
if top_k is not None:
__lowerCAmelCase : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ):
if num_frames is None:
__lowerCAmelCase : Union[str, Any] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
__lowerCAmelCase : Tuple = BytesIO(requests.get(_SCREAMING_SNAKE_CASE ).content )
__lowerCAmelCase : str = VideoReader(_SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : str = num_frames * frame_sampling_rate - 1
__lowerCAmelCase : Union[str, Any] = np.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num=_SCREAMING_SNAKE_CASE , dtype=np.intaa )
__lowerCAmelCase : int = videoreader.get_batch(_SCREAMING_SNAKE_CASE ).asnumpy()
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
if top_k > self.model.config.num_labels:
__lowerCAmelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase : Any = probs.topk(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_lowerCamelCase , )
assert hasattr(self , '''env''' )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Union[str, Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_lowerCamelCase , instance_count=_lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=_lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_lowerCamelCase , py_version='''py36''' , )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
TrainingJobAnalytics(_lowerCamelCase ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[int]:
# create estimator
SCREAMING_SNAKE_CASE : Any = self.create_estimator(_lowerCamelCase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _lowerCamelCase )
| 19 |
import datasets
from .evaluate import evaluate
a__ : Dict = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
a__ : List[str] = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
a__ : List[Any] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE : int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : Dict = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 19 | 1 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = checkpoint
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.conv_in.weight''']
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['''encoder.conv_in.bias''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.conv_out.weight''']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['''encoder.conv_out.bias''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''encoder.norm_out.weight''']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['''encoder.norm_out.bias''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''decoder.conv_in.weight''']
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['''decoder.conv_in.bias''']
SCREAMING_SNAKE_CASE_ : Tuple = vae_state_dict['''decoder.conv_out.weight''']
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['''decoder.conv_out.bias''']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['''decoder.norm_out.weight''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''decoder.norm_out.bias''']
SCREAMING_SNAKE_CASE_ : str = vae_state_dict['''quant_conv.weight''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict['''quant_conv.bias''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''post_quant_conv.weight''']
SCREAMING_SNAKE_CASE_ : int = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE_ : Optional[int] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
SCREAMING_SNAKE_CASE_ : Tuple = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE_ : Optional[Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
SCREAMING_SNAKE_CASE_ : Tuple = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(__a )
}
for i in range(__a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : List[str] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
SCREAMING_SNAKE_CASE_ : List[Any] = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = {'''old''': f'down.{i}.block', '''new''': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
SCREAMING_SNAKE_CASE_ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : List[str] = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : Dict = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : Dict = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = renew_vae_attention_paths(__a )
SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
SCREAMING_SNAKE_CASE_ : List[str] = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE_ : Tuple = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
SCREAMING_SNAKE_CASE_ : str = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
SCREAMING_SNAKE_CASE_ : Optional[Any] = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''old''': f'up.{block_id}.block', '''new''': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
SCREAMING_SNAKE_CASE_ : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : int = renew_vae_resnet_paths(__a )
SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
SCREAMING_SNAKE_CASE_ : List[Any] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
SCREAMING_SNAKE_CASE_ : Dict = renew_vae_attention_paths(__a )
SCREAMING_SNAKE_CASE_ : Tuple = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def _A (__a , __a , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
SCREAMING_SNAKE_CASE_ : Dict = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE_ : Any = OmegaConf.load(__a )
SCREAMING_SNAKE_CASE_ : int = 5_12
SCREAMING_SNAKE_CASE_ : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE_ : Dict = {}
with safe_open(__a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE_ : Any = f.get_tensor(__a )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(__a , map_location=__a )['''state_dict''']
# Convert the VAE model.
SCREAMING_SNAKE_CASE_ : Any = create_vae_diffusers_config(__a , image_size=__a )
SCREAMING_SNAKE_CASE_ : int = custom_convert_ldm_vae_checkpoint(__a , __a )
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
UpperCAmelCase_ : Any = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 91 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''input_features''', '''is_longer''']
def __init__( self , snake_case=64 , snake_case=4_8000 , snake_case=480 , snake_case=10 , snake_case=1024 , snake_case=0.0 , snake_case=False , snake_case = 0 , snake_case = 1_4000 , snake_case = None , snake_case = "fusion" , snake_case = "repeatpad" , **snake_case , ):
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
snake_case_ = top_db
snake_case_ = truncation
snake_case_ = padding
snake_case_ = fft_window_size
snake_case_ = (fft_window_size >> 1) + 1
snake_case_ = hop_length
snake_case_ = max_length_s
snake_case_ = max_length_s * sampling_rate
snake_case_ = sampling_rate
snake_case_ = frequency_min
snake_case_ = frequency_max
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='htk' , )
snake_case_ = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='slaney' , mel_scale='slaney' , )
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a ( self , snake_case , snake_case = None ):
snake_case_ = spectrogram(
snake_case , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='dB' , )
return log_mel_spectrogram.T
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
snake_case_ = [0]
# randomly choose index for each part
snake_case_ = np.random.choice(ranges[0] )
snake_case_ = np.random.choice(ranges[1] )
snake_case_ = np.random.choice(ranges[2] )
snake_case_ = mel[idx_front : idx_front + chunk_frames, :]
snake_case_ = mel[idx_middle : idx_middle + chunk_frames, :]
snake_case_ = mel[idx_back : idx_back + chunk_frames, :]
snake_case_ = torch.tensor(mel[None, None, :] )
snake_case_ = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='bilinear' , align_corners=snake_case )
snake_case_ = mel_shrink[0][0].numpy()
snake_case_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a ( self , snake_case , snake_case , snake_case , snake_case ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
snake_case_ = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
snake_case_ = len(snake_case ) - max_length
snake_case_ = np.random.randint(0 , overflow + 1 )
snake_case_ = waveform[idx : idx + max_length]
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
snake_case_ = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
snake_case_ = np.stack([mel, mel, mel, mel] , axis=0 )
snake_case_ = False
else:
snake_case_ = self._random_mel_fusion(snake_case , snake_case , snake_case )
snake_case_ = True
else:
raise NotImplementedError(F'''data_truncating {truncation} not implemented''' )
else:
snake_case_ = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
snake_case_ = int(max_length / len(snake_case ) )
snake_case_ = np.stack(np.tile(snake_case , snake_case ) )
snake_case_ = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters )
snake_case_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
snake_case_ = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , **snake_case , ):
snake_case_ = truncation if truncation is not None else self.truncation
snake_case_ = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case_ = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
snake_case_ = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
snake_case_ = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
snake_case_ = []
snake_case_ = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
snake_case_ = np.random.randint(0 , len(snake_case ) )
snake_case_ = True
if isinstance(input_mel[0] , snake_case ):
snake_case_ = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
snake_case_ = [[longer] for longer in is_longer]
snake_case_ = {'input_features': input_mel, 'is_longer': is_longer}
snake_case_ = BatchFeature(snake_case )
if return_tensors is not None:
snake_case_ = input_features.convert_to_tensors(snake_case )
return input_features
| 285 | 0 |
UpperCamelCase__ = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = [False] * len(__A )
UpperCAmelCase__ = [s]
UpperCAmelCase__ = True
while queue:
UpperCAmelCase__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__A )
UpperCAmelCase__ = True
UpperCAmelCase__ = u
return visited[t]
def lowerCAmelCase_ ( __A, __A, __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = [-1] * (len(__A ))
UpperCAmelCase__ = 0
UpperCAmelCase__ = []
UpperCAmelCase__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(__A, __A, __A, __A ):
UpperCAmelCase__ = float("Inf" )
UpperCAmelCase__ = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase__ = min(__A, graph[parent[s]][s] )
UpperCAmelCase__ = parent[s]
max_flow += path_flow
UpperCAmelCase__ = sink
while v != source:
UpperCAmelCase__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase__ = parent[v]
for i in range(len(__A ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 143 | from __future__ import annotations
def lowerCAmelCase_ ( __A, __A, __A, ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 1 |
from string import ascii_lowercase, ascii_uppercase
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
if not sentence:
return ""
lowercase : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 20 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class __snake_case ( lowerCAmelCase ):
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.framework == "tf":
lowercase : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.get_masked_index(snake_case )
lowercase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,f"No mask_token ({self.tokenizer.mask_token}) found on the input" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
if return_tensors is None:
lowercase : int = self.framework
lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=snake_case )
self.ensure_exactly_one_mask_token(snake_case )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.model(**snake_case )
lowercase : Tuple = model_inputs["""input_ids"""]
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ,snake_case=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase : str = target_ids.shape[0]
lowercase : Optional[Any] = model_outputs["""input_ids"""][0]
lowercase : List[str] = model_outputs["""logits"""]
if self.framework == "tf":
lowercase : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase : Tuple = outputs.numpy()
lowercase : Tuple = outputs[0, masked_index, :]
lowercase : Any = stable_softmax(snake_case ,axis=-1 )
if target_ids is not None:
lowercase : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case ,0 ) ,target_ids.reshape(-1 ,1 ) )
lowercase : int = tf.expand_dims(snake_case ,0 )
lowercase : Tuple = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : int = topk.values.numpy(), topk.indices.numpy()
else:
lowercase : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase : Union[str, Any] = outputs[0, masked_index, :]
lowercase : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase : List[str] = probs[..., target_ids]
lowercase , lowercase : Union[str, Any] = probs.topk(snake_case )
lowercase : Any = []
lowercase : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
lowercase : Dict = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
lowercase : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowercase : Union[str, Any] = target_ids[p].tolist()
lowercase : Tuple = p
# Filter padding out:
lowercase : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase : Tuple = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Optional[Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(snake_case )
result.append(snake_case )
if single_mask:
return result[0]
return result
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
lowercase : List[Any] = [targets]
try:
lowercase : List[str] = self.tokenizer.get_vocab()
except Exception:
lowercase : Any = {}
lowercase : Dict = []
for target in targets:
lowercase : Dict = vocab.get(snake_case ,snake_case )
if id_ is None:
lowercase : Optional[int] = self.tokenizer(
snake_case ,add_special_tokens=snake_case ,return_attention_mask=snake_case ,return_token_type_ids=snake_case ,max_length=1 ,truncation=snake_case ,)["""input_ids"""]
if len(snake_case ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowercase : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowercase : Optional[Any] = list(set(snake_case ) )
if len(snake_case ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowercase : Optional[Any] = np.array(snake_case )
return target_ids
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : Dict = {}
if targets is not None:
lowercase : str = self.get_target_ids(snake_case ,snake_case )
lowercase : List[Any] = target_ids
if top_k is not None:
lowercase : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,"""The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Tuple = super().__call__(snake_case ,**snake_case )
if isinstance(snake_case ,snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
| 20 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase ={
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
UpperCAmelCase ={
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _A ( ):
"""simple docstring"""
A = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
A = bs[:]
A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_a )
cs.append(2**8 + n )
n += 1
A = [chr(_a ) for n in cs]
return dict(zip(_a , _a ) )
def _A ( _a : Any ):
"""simple docstring"""
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_="replace" ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="<s>" ,lowerCamelCase_="<unk>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<mask>" ,lowerCamelCase_=False ,**lowerCamelCase_ ,) -> List[Any]:
A = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else bos_token
A = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else eos_token
A = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else sep_token
A = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else cls_token
A = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else unk_token
A = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(lowerCamelCase_ ,lstrip=lowerCamelCase_ ,rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,add_prefix_space=lowerCamelCase_ ,**lowerCamelCase_ ,)
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as vocab_handle:
A = json.load(lowerCamelCase_ )
A = {v: k for k, v in self.encoder.items()}
A = errors # how to handle errors in decoding
A = bytes_to_unicode()
A = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ ,encoding="""utf-8""" ) as merges_handle:
A = merges_handle.read().split("""\n""" )[1:-1]
A = [tuple(merge.split() ) for merge in bpe_merges]
A = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
A = {}
A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase__ ( self ) -> Tuple:
return len(self.encoder )
def UpperCamelCase__ ( self ) -> Dict:
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
A = tuple(lowerCamelCase_ )
A = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
A = min(lowerCamelCase_ ,key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(lowerCamelCase_ ):
try:
A = word.index(lowerCamelCase_ ,lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(lowerCamelCase_ )
A = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
A = get_pairs(lowerCamelCase_ )
A = """ """.join(lowerCamelCase_ )
A = word
return word
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Dict:
A = []
for token in re.findall(self.pat ,lowerCamelCase_ ):
A = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
return self.encoder.get(lowerCamelCase_ ,self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
return self.decoder.get(lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Dict:
A = """""".join(lowerCamelCase_ )
A = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A = os.path.join(
lowerCamelCase_ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase_ ,ensure_ascii=lowerCamelCase_ ) + """\n""" )
A = 0
with open(lowerCamelCase_ ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=False ,**lowerCamelCase_ ) -> Any:
A = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
A = """ """ + text
return (text, kwargs)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase_ = None ,lowerCamelCase_ = None ,) -> dict:
A = super()._pad(
encoded_inputs=lowerCamelCase_ ,max_length=lowerCamelCase_ ,padding_strategy=lowerCamelCase_ ,pad_to_multiple_of=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,)
# Load from model defaults
if return_attention_mask is None:
A = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
A = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
A = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 363 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''OwlViTImageProcessor'''
_lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,**lowerCamelCase_ ) -> Tuple:
A = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase_ ,)
A = kwargs.pop("""feature_extractor""" )
A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase_ ,lowerCamelCase_ )
def __call__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="max_length" ,lowerCamelCase_="np" ,**lowerCamelCase_ ) -> Optional[Any]:
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and not isinstance(text[0] ,lowerCamelCase_ )):
A = [self.tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )]
elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ) and isinstance(text[0] ,lowerCamelCase_ ):
A = []
# Maximum number of queries across batch
A = max([len(lowerCamelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase_ ) != max_num_queries:
A = t + [""" """] * (max_num_queries - len(lowerCamelCase_ ))
A = self.tokenizer(lowerCamelCase_ ,padding=lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
encodings.append(lowerCamelCase_ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
A = np.concatenate([encoding["""input_ids"""] for encoding in encodings] ,axis=0 )
A = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] ,axis=0 )
A = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A = torch.cat([encoding["""input_ids"""] for encoding in encodings] ,dim=0 )
A = torch.cat([encoding["""attention_mask"""] for encoding in encodings] ,dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A = tf.stack([encoding["""input_ids"""] for encoding in encodings] ,axis=0 )
A = tf.stack([encoding["""attention_mask"""] for encoding in encodings] ,axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
A = BatchEncoding()
A = input_ids
A = attention_mask
if query_images is not None:
A = BatchEncoding()
A = self.image_processor(
lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ ).pixel_values
A = query_pixel_values
if images is not None:
A = self.image_processor(lowerCamelCase_ ,return_tensors=lowerCamelCase_ ,**lowerCamelCase_ )
if text is not None and images is not None:
A = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase_ ) ,tensor_type=lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> int:
return self.image_processor.post_process(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]:
return self.image_processor.post_process_object_detection(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[Any]:
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*lowerCamelCase_ ,**lowerCamelCase_ )
def UpperCamelCase__ ( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> List[str]:
return self.tokenizer.decode(*lowerCamelCase_ ,**lowerCamelCase_ )
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase_ ,)
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase_ ,)
return self.image_processor
| 77 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a_ : Optional[int] = datasets.load_iris()
a_ : str = np.array(data["""data"""])
a_ : Optional[Any] = np.array(data["""target"""])
a_ : Tuple = data['''target_names''']
a_ : Optional[int] = train_test_split(X, y)
def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
return np.linalg.norm(np.array(__snake_case ) - np.array(__snake_case ) )
def __snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=5 ):
lowerCamelCase_ = zip(__snake_case , __snake_case )
# List of distances of all points from the point to be classified
lowerCamelCase_ = []
for data_point in data:
lowerCamelCase_ = euclidean_distance(data_point[0] , __snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCamelCase_ = [i[1] for i in sorted(__snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCamelCase_ = Counter(__snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 55 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = tempfile.mkdtemp()
__A : Optional[int] = BlipImageProcessor()
__A : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
__A : Any = BlipProcessor(_UpperCAmelCase , _UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase).tokenizer
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase).image_processor
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : Union[str, Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : List[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : int = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[str] = self.prepare_image_inputs()
__A : str = image_processor(_UpperCAmelCase , return_tensors='np')
__A : str = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Union[str, Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'lower newer'
__A : Dict = processor(text=_UpperCAmelCase)
__A : Tuple = tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Optional[Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = 'lower newer'
__A : int = self.prepare_image_inputs()
__A : List[Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : List[str] = processor.batch_decode(_UpperCAmelCase)
__A : List[str] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'lower newer'
__A : str = self.prepare_image_inputs()
__A : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask']) | 190 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = 'decision_transformer'
snake_case_ = ['past_key_values']
snake_case_ = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Tuple , snake_case : Union[str, Any]=17 , snake_case : str=4 , snake_case : int=128 , snake_case : Tuple=4096 , snake_case : str=True , snake_case : Optional[Any]=1 , snake_case : str=1024 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=1 , snake_case : Tuple=None , snake_case : Optional[Any]="relu" , snake_case : Optional[Any]=0.1 , snake_case : int=0.1 , snake_case : Optional[Any]=0.1 , snake_case : Union[str, Any]=1e-5 , snake_case : Optional[int]=0.02 , snake_case : Optional[int]=True , snake_case : Any=True , snake_case : Optional[int]=5_0256 , snake_case : List[Any]=5_0256 , snake_case : Tuple=False , snake_case : Tuple=False , **snake_case : Any , ):
'''simple docstring'''
A__ : Union[str, Any] = state_dim
A__ : Tuple = act_dim
A__ : Optional[Any] = hidden_size
A__ : str = max_ep_len
A__ : Any = action_tanh
A__ : Any = vocab_size
A__ : Dict = n_positions
A__ : Optional[Any] = n_layer
A__ : int = n_head
A__ : List[str] = n_inner
A__ : Tuple = activation_function
A__ : Any = resid_pdrop
A__ : List[str] = embd_pdrop
A__ : Any = attn_pdrop
A__ : List[Any] = layer_norm_epsilon
A__ : Tuple = initializer_range
A__ : Union[str, Any] = scale_attn_weights
A__ : Tuple = use_cache
A__ : int = scale_attn_by_inverse_layer_idx
A__ : Optional[int] = reorder_and_upcast_attn
A__ : Optional[int] = bos_token_id
A__ : Tuple = eos_token_id
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
| 296 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296 | 1 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ (lowercase__ ):
'''simple docstring'''
def __init__( self , lowercase_ = "▁" , lowercase_ = True , lowercase_ = "<unk>" , lowercase_ = "</s>" , lowercase_ = "<pad>" , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
UpperCAmelCase_ : Optional[Any] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase_ : Any = token_dict["token"]
UpperCAmelCase_ : Optional[int] = Tokenizer(Unigram() )
UpperCAmelCase_ : Union[str, Any] = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}" ) , " " ),
normalizers.Lowercase(),
] )
UpperCAmelCase_ : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase_ : Tuple = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase_ : str = TemplateProcessing(
single=F"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])] , )
UpperCAmelCase_ : Optional[int] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = 8000 , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase_ : List[str] = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCamelCase__ ( self , lowercase_ , lowercase_ = 8000 , lowercase_ = True , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = json.loads(self._tokenizer.to_str() )
UpperCAmelCase_ : Optional[Any] = self.special_tokens["unk"]["id"]
UpperCAmelCase_ : Any = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 61 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__magic_name__ ):
UpperCamelCase__ = ['''keras_nlp''']
def __init__( self :Union[str, Any] , *__magic_name__ :Union[str, Any] , **__magic_name__ :str ):
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 356 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
UpperCamelCase__ = '''nat'''
UpperCamelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :Any , __magic_name__ :int=4 , __magic_name__ :Dict=3 , __magic_name__ :List[str]=64 , __magic_name__ :Optional[int]=[3, 4, 6, 5] , __magic_name__ :int=[2, 4, 8, 16] , __magic_name__ :str=7 , __magic_name__ :Tuple=3.0 , __magic_name__ :Dict=True , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.0 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[Any]="gelu" , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1E-5 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :int=None , __magic_name__ :Any=None , **__magic_name__ :Dict , ):
'''simple docstring'''
super().__init__(**__magic_name__ )
a = patch_size
a = num_channels
a = embed_dim
a = depths
a = len(__magic_name__ )
a = num_heads
a = kernel_size
a = mlp_ratio
a = qkv_bias
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = drop_path_rate
a = hidden_act
a = layer_norm_eps
a = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a = int(embed_dim * 2 ** (len(__magic_name__ ) - 1) )
a = layer_scale_init_value
a = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(__magic_name__ ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__magic_name__ , out_indices=__magic_name__ , stage_names=self.stage_names )
| 347 | 0 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
lowercase_ : Optional[Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F'''{test_file} instead.''' )
lowercase_ : str = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
lowercase_ : Optional[int] = components[:-1] + [test_fn.replace('.py' , '' )]
lowercase_ : Dict = '.'.join(__SCREAMING_SNAKE_CASE )
return test_module_path
def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : Any = get_module_path(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = importlib.import_module(__SCREAMING_SNAKE_CASE )
return test_module
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = []
lowercase_ : str = get_test_module(__SCREAMING_SNAKE_CASE )
for attr in dir(__SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : Optional[int] = []
lowercase_ : Optional[Any] = get_test_module(__SCREAMING_SNAKE_CASE )
for attr in dir(__SCREAMING_SNAKE_CASE ):
lowercase_ : Dict = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase_ : str = getattr(__SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(__SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(__SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase__( __SCREAMING_SNAKE_CASE : Dict ):
lowercase_ : Union[str, Any] = get_test_classes(__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Any = test_class()
if hasattr(__SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
lowercase_ : Any = None
if hasattr(__SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase_ : Union[str, Any] = test.model_tester.__class__
return model_tester
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : str = get_test_classes(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(__SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Optional[Any] = get_test_classes_for_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = []
for test_class in test_classes:
lowercase_ : str = get_model_tester_from_test_class(__SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(__SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x.__name__ )
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : List[str] = get_test_classes(__SCREAMING_SNAKE_CASE )
lowercase_ : str = {test_class: get_model_tester_from_test_class(__SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : int = get_model_classes(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = {
model_class: get_test_classes_for_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : str = get_model_classes(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = {
model_class: get_tester_classes_for_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return o
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(__SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return {to_json(__SCREAMING_SNAKE_CASE ): to_json(__SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 213 | """simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( lowercase_ ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCamelCase ,'num_attention_heads' ) )
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=64 ,__UpperCamelCase=3 ,__UpperCamelCase=3 ,__UpperCamelCase=2 ,__UpperCamelCase=1 ,__UpperCamelCase=16 ,__UpperCamelCase=[128, 256, 384] ,__UpperCamelCase=[4, 6, 8] ,__UpperCamelCase=[2, 3, 4] ,__UpperCamelCase=[16, 16, 16] ,__UpperCamelCase=0 ,__UpperCamelCase=[2, 2, 2] ,__UpperCamelCase=[2, 2, 2] ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=2 ,) -> str:
'''simple docstring'''
lowercase_ : Optional[Any] = parent
lowercase_ : Any = batch_size
lowercase_ : str = image_size
lowercase_ : List[Any] = num_channels
lowercase_ : Any = kernel_size
lowercase_ : Any = stride
lowercase_ : List[str] = padding
lowercase_ : Tuple = hidden_sizes
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Optional[int] = depths
lowercase_ : Union[str, Any] = key_dim
lowercase_ : List[str] = drop_path_rate
lowercase_ : Dict = patch_size
lowercase_ : List[Any] = attention_ratio
lowercase_ : Optional[int] = mlp_ratio
lowercase_ : Any = initializer_range
lowercase_ : Optional[Any] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : Dict = num_labels
lowercase_ : Union[str, Any] = initializer_range
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
lowercase_ : List[str] = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Optional[Any] = model(__UpperCamelCase )
lowercase_ : Union[str, Any] = (self.image_size, self.image_size)
lowercase_ , lowercase_ : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : Optional[int] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase_ : Dict = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : Optional[Any] = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Union[str, Any] = model(__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': LevitModel,
'image-classification': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Tuple = LevitModelTester(self )
lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Levit does not output attentions' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(__UpperCamelCase )
lowercase_ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
lowercase_ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[str] = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase )
lowercase_ : Optional[int] = (self.model_tester.image_size, self.model_tester.image_size)
lowercase_ , lowercase_ : Union[str, Any] = image_size[0], image_size[1]
for _ in range(4 ):
lowercase_ : Optional[Any] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase_ : Optional[int] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = super()._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase_ : Dict = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
lowercase_ : List[Any] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(**__UpperCamelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Union[str, Any] = False
lowercase_ : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase_ : Dict = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
lowercase_ : Tuple = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
lowercase_ : Any = model(**__UpperCamelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : int = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ : Any = problem_type['title']
lowercase_ : Tuple = problem_type['num_labels']
lowercase_ : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
lowercase_ : Optional[int] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
lowercase_ : Any = inputs['labels'].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
lowercase_ : str = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
lowercase_ : int = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
lowercase_ : str = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Optional[int] = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**__UpperCamelCase )
# verify the logits
lowercase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : Any = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 213 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Any=32 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : List[str]=10 , __UpperCAmelCase : Dict=[10, 20, 30, 40] , __UpperCAmelCase : Union[str, Any]=[1, 1, 2, 1] , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : str="relu" , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : str=None , ):
a : Optional[Any] = parent
a : List[str] = batch_size
a : List[str] = image_size
a : Any = num_channels
a : int = embeddings_size
a : str = hidden_sizes
a : Optional[Any] = depths
a : List[str] = is_training
a : int = use_labels
a : List[str] = hidden_act
a : Tuple = num_labels
a : Union[str, Any] = scope
a : int = len(__UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : str = self.get_config()
return config, pixel_values
def __snake_case ( self : int):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __snake_case ( self : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int]):
a : List[Any] = FlaxRegNetModel(config=__UpperCAmelCase)
a : List[str] = model(__UpperCAmelCase)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]):
a : Any = self.num_labels
a : List[Any] = FlaxRegNetForImageClassification(config=__UpperCAmelCase)
a : Union[str, Any] = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : int):
a : str = self.prepare_config_and_inputs()
a , a : int = config_and_inputs
a : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : int = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase : Dict = False
UpperCAmelCase : Any = False
UpperCAmelCase : Dict = False
def __snake_case ( self : str):
a : Union[str, Any] = FlaxRegNetModelTester(self)
a : str = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Union[str, Any]):
return
def __snake_case ( self : Any):
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def __snake_case ( self : Any):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def __snake_case ( self : Dict):
pass
def __snake_case ( self : Optional[Any]):
a , a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : int = model_class(__UpperCAmelCase)
a : Dict = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : str = [*signature.parameters.keys()]
a : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
def check_hidden_states_output(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str):
a : Optional[Any] = model_class(__UpperCAmelCase)
a : List[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : str = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase) , expected_num_stages + 1)
a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : str = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Tuple = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a : int = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = model_class(__UpperCAmelCase)
@jax.jit
def model_jitted(__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : List[Any]):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase)
with self.subTest("JIT Enabled"):
a : Dict = model_jitted(**__UpperCAmelCase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
a : Any = model_jitted(**__UpperCAmelCase).to_tuple()
self.assertEqual(len(__UpperCAmelCase) , len(__UpperCAmelCase))
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase):
self.assertEqual(jitted_output.shape , output.shape)
def lowercase ( )-> List[Any]:
'''simple docstring'''
a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : str):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def __snake_case ( self : int):
a : Tuple = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
a : Optional[Any] = self.default_image_processor
a : List[Any] = prepare_img()
a : List[str] = image_processor(images=__UpperCAmelCase , return_tensors="np")
a : Any = model(**__UpperCAmelCase)
# verify the logits
a : Any = (1, 1000)
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : Tuple = jnp.array([-0.4_180, -1.5_051, -3.4_836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
| 226 |
"""simple docstring"""
import sys
import turtle
def lowercase ( A_ , A_ )-> tuple[float, float]:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase ( A_ , A_ , A_ , A_ , )-> None:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
triangle(A_ , get_mid(A_ , A_ ) , get_mid(A_ , A_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__lowercase = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__lowercase = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 226 | 1 |
def UpperCAmelCase__ ( _A : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCAmelCase__ ( _A : float ):
'''simple docstring'''
if edge <= 0 or not isinstance(_A , _A ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = torch.device('''cpu''')
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ =Image.open(requests.get(_A , stream=_A ).raw )
return im
def UpperCAmelCase__ ( _A : Optional[int] ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.17_03E00, 2.11_07E00, -2.08_11E00, 8.86_85E-01, 2.43_60E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.96_36E-01, 2.34_78E-01, -1.69_63E00, -1.73_81E00, -8.63_37E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.27_68E-01, -4.74_29E-01, -1.08_97E00, -1.02_48E00, 3.55_23E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.53_30E-01, 2.42_11E-01, -6.01_85E-01, -8.27_89E-01, -6.04_46E-02] )
def UpperCAmelCase__ ( _A : List[str] , _A : Union[str, Any] , _A : str ):
'''simple docstring'''
a__ =dct.pop(_A )
a__ =val
def UpperCAmelCase__ ( _A : Any ):
'''simple docstring'''
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
a__ =k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
a__ =k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
a__ =k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
a__ =k_new.split('''.''' )
if ls[2].isdigit():
a__ ='''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
a__ =k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCAmelCase__ ( _A : int , _A : Union[str, Any] , _A : int ):
'''simple docstring'''
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =10_00
a__ ='''huggingface/label-files'''
a__ ='''imagenet-1k-id2label.json'''
a__ =json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
a__ ={int(_A ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
a__ =torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' , check_hash=_A )
else:
a__ =torch.load(_A , map_location='''cpu''' )
a__ =checkpoint
a__ =create_rename_keys(_A )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_A , _A , _A )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(_A ).eval()
hf_model.load_state_dict(_A )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('''preprocessor_config''' )
a__ =processor(images=_A , return_tensors='''pt''' )
# compare outputs from both models
a__ =get_expected_output(_A )
a__ =hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , _A , atol=1E-3 )
Path(_A ).mkdir(exist_ok=_A )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
lowerCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 188 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = XGLMTokenizer
__UpperCamelCase = XGLMTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def A__ (self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """<pad>"""
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(lowerCamelCase ) , 1_008 )
def A__ (self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
_lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def A__ (self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def A__ (self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase , f.name )
_lowerCAmelCase = XGLMTokenizer(f.name , keep_accents=lowerCamelCase )
_lowerCAmelCase = pickle.dumps(lowerCamelCase )
pickle.loads(lowerCamelCase )
def A__ (self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = """I was born in 92000, and this is falsé."""
_lowerCAmelCase = tokenizer.tokenize(lowerCamelCase )
_lowerCAmelCase = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
_lowerCAmelCase = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(lowerCamelCase )
_lowerCAmelCase = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = """Hello World!"""
_lowerCAmelCase = [2, 31_227, 4_447, 35]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="""facebook/xglm-564M""" , padding=lowerCamelCase , ) | 317 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 317 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Dict = """beit"""
def __init__(self : Any , UpperCamelCase : Union[str, Any]=8192 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : str=12 , UpperCamelCase : Optional[int]=12 , UpperCamelCase : Optional[int]=3072 , UpperCamelCase : str="gelu" , UpperCamelCase : Any=0.0 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Union[str, Any]=1E-12 , UpperCamelCase : Optional[Any]=224 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Tuple=False , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Union[str, Any]=[3, 5, 7, 11] , UpperCamelCase : Dict=[1, 2, 3, 6] , UpperCamelCase : List[str]=True , UpperCamelCase : Union[str, Any]=0.4 , UpperCamelCase : Optional[int]=256 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=False , UpperCamelCase : str=255 , **UpperCamelCase : Any , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = use_mask_token
lowercase__ = use_absolute_position_embeddings
lowercase__ = use_relative_position_bias
lowercase__ = use_shared_relative_position_bias
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__ = out_indices
lowercase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__ = use_auxiliary_head
lowercase__ = auxiliary_loss_weight
lowercase__ = auxiliary_channels
lowercase__ = auxiliary_num_convs
lowercase__ = auxiliary_concat_input
lowercase__ = semantic_loss_ignore_index
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = version.parse("""1.11""" )
@property
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return 1E-4
| 2 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True ):
"""simple docstring"""
print(F"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase__ : Union[str, Any] = timm.create_model("levit_128s" , pretrained=lowerCamelCase__ )
else:
lowercase__ : Union[str, Any] = timm.create_model("levit_128" , pretrained=lowerCamelCase__ )
if hidden_sizes == 192:
lowercase__ : Dict = timm.create_model("levit_192" , pretrained=lowerCamelCase__ )
if hidden_sizes == 256:
lowercase__ : Optional[Any] = timm.create_model("levit_256" , pretrained=lowerCamelCase__ )
if hidden_sizes == 384:
lowercase__ : List[str] = timm.create_model("levit_384" , pretrained=lowerCamelCase__ )
from_model.eval()
lowercase__ : Union[str, Any] = LevitForImageClassificationWithTeacher(lowerCamelCase__ ).eval()
lowercase__ : Tuple = OrderedDict()
lowercase__ : Dict = from_model.state_dict()
lowercase__ : Union[str, Any] = list(from_model.state_dict().keys() )
lowercase__ : Any = list(our_model.state_dict().keys() )
print(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Union[str, Any] = weights[og_keys[i]]
our_model.load_state_dict(lowerCamelCase__ )
lowercase__ : List[str] = torch.randn((2, 3, 224, 224) )
lowercase__ : Optional[Any] = from_model(lowerCamelCase__ )
lowercase__ : Optional[Any] = our_model(lowerCamelCase__ ).logits
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ ), "The model logits don't match the original one."
lowercase__ : Optional[Any] = name
print(lowerCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F"""Pushed {checkpoint_name}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True ):
"""simple docstring"""
lowercase__ : Optional[Any] = "imagenet-1k-id2label.json"
lowercase__ : str = 1_000
lowercase__ : Any = (1, num_labels)
lowercase__ : Optional[Any] = "huggingface/label-files"
lowercase__ : Optional[Any] = num_labels
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Dict = idalabel
lowercase__ : str = {v: k for k, v in idalabel.items()}
lowercase__ : Optional[Any] = partial(lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ )
lowercase__ : List[str] = {
"levit-128S": 128,
"levit-128": 128,
"levit-192": 192,
"levit-256": 256,
"levit-384": 384,
}
lowercase__ : int = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , lowerCamelCase__ , names_to_config[model_name] , lowerCamelCase__ , lowerCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 130 | 0 |
from collections.abc import Callable
import numpy as np
def _a ( SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ : str = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ : str = ya
SCREAMING_SNAKE_CASE__ : List[str] = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Tuple = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : complex , SCREAMING_SNAKE_CASE__ : str = "x" , SCREAMING_SNAKE_CASE__ : float = 10**-10 , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> complex:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = symbols(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = lambdify(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = lambdify(SCREAMING_SNAKE_CASE__ , diff(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Any = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE__ ) / diff_function(
SCREAMING_SNAKE_CASE__ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE__ : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}",
)
# Find root of cos(x)
print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 191 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCAmelCase : int = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCAmelCase : List[str] = concatenate_datasets
_UpperCAmelCase : str = DownloadConfig
_UpperCAmelCase : Tuple = DownloadManager
_UpperCAmelCase : Union[str, Any] = DownloadMode
_UpperCAmelCase : Union[str, Any] = DownloadConfig
_UpperCAmelCase : List[Any] = DownloadMode
_UpperCAmelCase : List[str] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 285 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_info.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfo.from_directory(UpperCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(UpperCamelCase__ , 'dataset_info.json' ) )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
snake_case_ = dataset_info._to_yaml_dict()
assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case_ = yaml.safe_dump(UpperCamelCase__ )
snake_case_ = yaml.safe_load(UpperCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = DatasetInfo()
snake_case_ = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = str(UpperCamelCase__ )
dataset_infos_dict.write_to_directory(UpperCamelCase__ )
snake_case_ = DatasetInfosDict.from_directory(UpperCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case_ = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(UpperCamelCase__ , 'README.md' ) )
| 285 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="attention" ) -> int:
UpperCamelCase : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
UpperCamelCase : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCamelCase : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
UpperCamelCase : str = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCamelCase : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
UpperCamelCase : Union[str, Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCamelCase : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
UpperCamelCase : Dict = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Union[str, Any]:
if split_mlp_wi:
UpperCamelCase : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
UpperCamelCase : str = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
UpperCamelCase : Tuple = (wi_a, wi_a)
else:
UpperCamelCase : Dict = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
UpperCamelCase : str = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def A_ ( _lowerCAmelCase , *, _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ) -> Dict:
UpperCamelCase : int = traverse_util.flatten_dict(variables["target"] )
UpperCamelCase : Union[str, Any] = {"/".join(_lowerCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCamelCase : List[str] = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , _lowerCAmelCase )
UpperCamelCase : Union[str, Any] = collections.OrderedDict()
# Shared embeddings.
UpperCamelCase : Union[str, Any] = old["token_embedder/embedding"]
# Encoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
UpperCamelCase : Tuple = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , "pre_attention_layer_norm" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , "attention" )
UpperCamelCase : int = layer_norm
UpperCamelCase : Dict = k.T
UpperCamelCase : Any = o.T
UpperCamelCase : Optional[int] = q.T
UpperCamelCase : str = v.T
# Block i, layer 1 (MLP).
UpperCamelCase : int = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , "pre_mlp_layer_norm" )
UpperCamelCase , UpperCamelCase : List[Any] = tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , "encoder" , _lowerCAmelCase )
UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
UpperCamelCase : List[Any] = wi[0].T
UpperCamelCase : Optional[Any] = wi[1].T
else:
UpperCamelCase : Any = wi.T
UpperCamelCase : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase : Optional[Any] = tax_relpos_bias_lookup(
_lowerCAmelCase , _lowerCAmelCase , "encoder" ).T
UpperCamelCase : Union[str, Any] = old["encoder/encoder_norm/scale"]
if not scalable_attention:
UpperCamelCase : Optional[int] = tax_relpos_bias_lookup(
_lowerCAmelCase , 0 , "encoder" ).T
UpperCamelCase : List[str] = tax_relpos_bias_lookup(
_lowerCAmelCase , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(_lowerCAmelCase ):
# Block i, layer 0 (Self Attention).
UpperCamelCase : Union[str, Any] = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "self_attention" )
UpperCamelCase : Any = layer_norm
UpperCamelCase : int = k.T
UpperCamelCase : Tuple = o.T
UpperCamelCase : List[str] = q.T
UpperCamelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
UpperCamelCase : Tuple = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = tax_attention_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "encoder_decoder_attention" )
UpperCamelCase : Optional[Any] = layer_norm
UpperCamelCase : List[str] = k.T
UpperCamelCase : Tuple = o.T
UpperCamelCase : Any = q.T
UpperCamelCase : Any = v.T
# Block i, layer 2 (MLP).
UpperCamelCase : Any = tax_layer_norm_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , "pre_mlp_layer_norm" )
UpperCamelCase , UpperCamelCase : Union[str, Any] = tax_mlp_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" , _lowerCAmelCase )
UpperCamelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
UpperCamelCase : int = wi[0].T
UpperCamelCase : Tuple = wi[1].T
else:
UpperCamelCase : Dict = wi.T
UpperCamelCase : Optional[int] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCamelCase : str = tax_relpos_bias_lookup(_lowerCAmelCase , _lowerCAmelCase , "decoder" ).T
UpperCamelCase : List[Any] = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCamelCase : Any = old["decoder/logits_dense/kernel"].T
return new
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any:
UpperCamelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCamelCase : Union[str, Any] = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCamelCase : Any = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
UpperCamelCase : Any = state_dict["shared.weight"]
return state_dict
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase : Tuple = checkpoints.load_tax_checkpoint(_lowerCAmelCase )
UpperCamelCase : int = convert_tax_to_pytorch(
_lowerCAmelCase , num_layers=config.num_layers , is_encoder_only=_lowerCAmelCase , scalable_attention=_lowerCAmelCase )
UpperCamelCase : List[Any] = make_state_dict(_lowerCAmelCase , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase , strict=_lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = False , ) -> Tuple:
UpperCamelCase : List[Any] = MTaConfig.from_json_file(_lowerCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCamelCase : int = UMTaEncoderModel(_lowerCAmelCase )
else:
UpperCamelCase : str = UMTaForConditionalGeneration(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(_lowerCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowerCAmelCase )
print("Done" )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__lowerCamelCase : Any = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 140 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCamelCase : Dict = logging.get_logger("""transformers.models.speecht5""")
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
hf_model.apply_weight_norm()
UpperCamelCase : int = checkpoint["input_conv.weight_g"]
UpperCamelCase : Dict = checkpoint["input_conv.weight_v"]
UpperCamelCase : List[Any] = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase : Any = checkpoint[F"""upsamples.{i}.1.weight_g"""]
UpperCamelCase : List[Any] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
UpperCamelCase : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
UpperCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
UpperCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
UpperCamelCase : Optional[Any] = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_g"]
UpperCamelCase : Tuple = checkpoint["output_conv.1.weight_v"]
UpperCamelCase : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> Tuple:
if config_path is not None:
UpperCamelCase : List[Any] = SpeechTaHifiGanConfig.from_pretrained(_lowerCAmelCase )
else:
UpperCamelCase : Optional[int] = SpeechTaHifiGanConfig()
UpperCamelCase : List[str] = SpeechTaHifiGan(_lowerCAmelCase )
UpperCamelCase : str = torch.load(_lowerCAmelCase )
load_weights(orig_checkpoint["model"]["generator"] , _lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : List[Any] = np.load(_lowerCAmelCase )
UpperCamelCase : List[str] = stats[0].reshape(-1 )
UpperCamelCase : Tuple = stats[1].reshape(-1 )
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
UpperCamelCase : Any = torch.from_numpy(_lowerCAmelCase ).float()
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 140 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A__ : Optional[Any] = logging.get_logger(__name__)
A__ : List[Any] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Any:
for attribute in key.split('.' ):
__lowerCamelCase : Optional[int] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
__lowerCamelCase : Optional[Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
__lowerCamelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
__lowerCamelCase : int = value
elif weight_type == "weight_g":
__lowerCamelCase : Dict = value
elif weight_type == "weight_v":
__lowerCamelCase : Optional[Any] = value
elif weight_type == "bias":
__lowerCamelCase : Optional[Any] = value
else:
__lowerCamelCase : int = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ) -> str:
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Any = fairseq_model.state_dict()
__lowerCamelCase : Tuple = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : Dict = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase : int = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase : str = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase : str = True
if "*" in mapped_key:
__lowerCamelCase : Dict = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
__lowerCamelCase : Any = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
__lowerCamelCase : int = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase : Tuple = 'weight_v'
elif "weight" in name:
__lowerCamelCase : int = 'weight'
elif "bias" in name:
__lowerCamelCase : Optional[Any] = 'bias'
else:
__lowerCamelCase : List[Any] = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ) -> Any:
__lowerCamelCase : List[str] = full_name.split('conv_layers.' )[-1]
__lowerCamelCase : str = name.split('.' )
__lowerCamelCase : Dict = int(items[0] )
__lowerCamelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__lowerCamelCase : List[str] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__lowerCamelCase : Dict = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__lowerCamelCase : Optional[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__lowerCamelCase : str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ) -> List[str]:
__lowerCamelCase : Optional[Any] = SEWConfig()
if is_finetuned:
__lowerCamelCase : str = model.wav_encoder.wav_model.cfg
else:
__lowerCamelCase : str = model.cfg
__lowerCamelCase : Union[str, Any] = fs_config.conv_bias
__lowerCamelCase : List[Any] = eval(fs_config.conv_feature_layers )
__lowerCamelCase : Tuple = [x[0] for x in conv_layers]
__lowerCamelCase : Tuple = [x[1] for x in conv_layers]
__lowerCamelCase : int = [x[2] for x in conv_layers]
__lowerCamelCase : List[Any] = 'gelu'
__lowerCamelCase : Dict = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
__lowerCamelCase : int = 0.0
__lowerCamelCase : Dict = fs_config.activation_fn.name
__lowerCamelCase : int = fs_config.encoder_embed_dim
__lowerCamelCase : List[Any] = 0.02
__lowerCamelCase : Any = fs_config.encoder_ffn_embed_dim
__lowerCamelCase : List[Any] = 1e-5
__lowerCamelCase : Dict = fs_config.encoder_layerdrop
__lowerCamelCase : List[str] = fs_config.encoder_attention_heads
__lowerCamelCase : Union[str, Any] = fs_config.conv_pos_groups
__lowerCamelCase : Optional[int] = fs_config.conv_pos
__lowerCamelCase : Optional[Any] = len(UpperCAmelCase_ )
__lowerCamelCase : int = fs_config.encoder_layers
__lowerCamelCase : str = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__lowerCamelCase : Union[str, Any] = model.cfg
__lowerCamelCase : List[Any] = fs_config.final_dropout
__lowerCamelCase : List[Any] = fs_config.layerdrop
__lowerCamelCase : Union[str, Any] = fs_config.activation_dropout
__lowerCamelCase : List[str] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__lowerCamelCase : Any = fs_config.attention_dropout
__lowerCamelCase : Any = fs_config.dropout_input
__lowerCamelCase : str = fs_config.dropout
__lowerCamelCase : Union[str, Any] = fs_config.mask_channel_length
__lowerCamelCase : str = fs_config.mask_channel_prob
__lowerCamelCase : Optional[int] = fs_config.mask_length
__lowerCamelCase : Tuple = fs_config.mask_prob
__lowerCamelCase : Union[str, Any] = 'Wav2Vec2FeatureExtractor'
__lowerCamelCase : int = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : List[str]=True ) -> Any:
if is_finetuned:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__lowerCamelCase : Any = SEWConfig.from_pretrained(UpperCAmelCase_ )
else:
__lowerCamelCase : Optional[int] = convert_config(model[0] , UpperCAmelCase_ )
__lowerCamelCase : Tuple = model[0].eval()
__lowerCamelCase : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
__lowerCamelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
if is_finetuned:
if dict_path:
__lowerCamelCase : str = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase : int = target_dict.pad_index
__lowerCamelCase : Tuple = target_dict.bos_index
__lowerCamelCase : Optional[int] = target_dict.pad_index
__lowerCamelCase : Dict = target_dict.bos_index
__lowerCamelCase : List[str] = target_dict.eos_index
__lowerCamelCase : Tuple = len(target_dict.symbols )
__lowerCamelCase : List[str] = os.path.join(UpperCAmelCase_ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , UpperCAmelCase_ )
__lowerCamelCase : str = WavaVecaCTCTokenizer(
UpperCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase_ , )
__lowerCamelCase : Union[str, Any] = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = SEWForCTC(UpperCAmelCase_ )
else:
__lowerCamelCase : Union[str, Any] = SEWModel(UpperCAmelCase_ )
feature_extractor.save_pretrained(UpperCAmelCase_ )
recursively_load_weights(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
A__ : Any = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 185 |
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = [0]
__lowerCamelCase : Any = [0]
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
__lowerCamelCase : List[str] = [60]
__lowerCamelCase : Union[str, Any] = [10]
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 0 )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : Optional[int] = 3
__lowerCamelCase : int = [1, 2, 3]
__lowerCamelCase : str = [3, 2, 1]
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 5 )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = 50
__lowerCamelCase : List[str] = [60, 1_00, 1_20]
__lowerCamelCase : List[str] = [10, 20, 30]
__lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(k.knapsack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 185 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int = 100_0000 ):
'''simple docstring'''
_a = limit + 1
_a = [0] * limit
for first_term in range(1 , UpperCamelCase ):
for n in range(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_a = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_a = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 179 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[int] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 179 | 1 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 1_0
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = [1, 2, 3, 4]
lowerCAmelCase__ :Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowerCAmelCase__ :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__UpperCAmelCase , self.block_size , 0 ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = ''
lowerCAmelCase__ , lowerCAmelCase__ :Any = process_story(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , [] )
self.assertEqual(__UpperCAmelCase , [] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
lowerCAmelCase__ , lowerCAmelCase__ :str = process_story(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[str] = ['It was the best of times.']
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = torch.tensor([1, 2, 3, 4] )
lowerCAmelCase__ :List[str] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
lowerCAmelCase__ :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 2_3 ).numpy() , expected.numpy() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
lowerCAmelCase__ :Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 1_0_1
lowerCAmelCase__ :str = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
lowerCAmelCase__ :Any = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
lowerCAmelCase__ :List[Any] = compute_token_type_ids(__UpperCAmelCase , __UpperCAmelCase )
np.testing.assert_array_equal(__UpperCAmelCase , __UpperCAmelCase )
| 293 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class A__ :
def __init__( self , UpperCamelCase__ = None ) -> Optional[int]:
'''simple docstring'''
if components is None:
A_ = []
A_ = list(__lowerCamelCase )
def __len__( self ) -> Optional[int]:
'''simple docstring'''
return len(self.__components )
def __str__( self ) -> Tuple:
'''simple docstring'''
return "(" + ",".join(map(__lowerCamelCase , self.__components ) ) + ")"
def __add__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = len(self )
if size == len(__lowerCamelCase ):
A_ = [self.__components[i] + other.component(__lowerCamelCase ) for i in range(__lowerCamelCase )]
return Vector(__lowerCamelCase )
else:
raise Exception("""must have the same size""" )
def __sub__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = len(self )
if size == len(__lowerCamelCase ):
A_ = [self.__components[i] - other.component(__lowerCamelCase ) for i in range(__lowerCamelCase )]
return Vector(__lowerCamelCase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
...
@overload
def __mul__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
...
def __mul__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(__lowerCamelCase , (float, int) ):
A_ = [c * other for c in self.__components]
return Vector(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ) and len(self ) == len(__lowerCamelCase ):
A_ = len(self )
A_ = [self.__components[i] * other.component(__lowerCamelCase ) for i in range(__lowerCamelCase )]
return sum(__lowerCamelCase )
else: # error case
raise Exception("""invalid operand!""" )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
return Vector(self.__components )
def snake_case_ ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
A_ = value
def snake_case_ ( self ) -> Any:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
A_ = [c**2 for c in self.__components]
return math.sqrt(sum(__lowerCamelCase ) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = False ) -> List[str]:
'''simple docstring'''
A_ = self * other
A_ = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Vector:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ )
return Vector([0] * dimension )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Vector:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ ) and (isinstance(UpperCAmelCase__, UpperCAmelCase__ ))
A_ = [0] * dimension
A_ = 1
return Vector(UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Vector:
assert (
isinstance(UpperCAmelCase__, UpperCAmelCase__ )
and isinstance(UpperCAmelCase__, UpperCAmelCase__ )
and (isinstance(UpperCAmelCase__, (int, float) ))
)
return x * scalar + y
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Vector:
random.seed(UpperCAmelCase__ )
A_ = [random.randint(UpperCAmelCase__, UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )]
return Vector(UpperCAmelCase__ )
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = matrix
A_ = w
A_ = h
def __str__( self ) -> Dict:
'''simple docstring'''
A_ = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
A_ = []
for i in range(self.__height ):
A_ = [
self.__matrix[i][j] + other.component(__lowerCamelCase , __lowerCamelCase )
for j in range(self.__width )
]
matrix.append(__lowerCamelCase )
return Matrix(__lowerCamelCase , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
A_ = []
for i in range(self.__height ):
A_ = [
self.__matrix[i][j] - other.component(__lowerCamelCase , __lowerCamelCase )
for j in range(self.__width )
]
matrix.append(__lowerCamelCase )
return Matrix(__lowerCamelCase , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
...
@overload
def __mul__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
...
def __mul__( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(__lowerCamelCase , __lowerCamelCase ): # matrix-vector
if len(__lowerCamelCase ) == self.__width:
A_ = zero_vector(self.__height )
for i in range(self.__height ):
A_ = [
self.__matrix[i][j] * other.component(__lowerCamelCase )
for j in range(self.__width )
]
ans.change_component(__lowerCamelCase , sum(__lowerCamelCase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(__lowerCamelCase , (int, float) ): # matrix-scalar
A_ = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__lowerCamelCase , self.__width , self.__height )
return None
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
return self.__height
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return self.__width
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
A_ = value
else:
raise Exception("""change_component: indices out of bounds""" )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
A_ = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__lowerCamelCase ) ):
A_ = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__lowerCamelCase , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__lowerCamelCase , __lowerCamelCase )
else:
raise Exception("""Indices out of bounds""" )
def snake_case_ ( self ) -> str:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A_ = [
self.__matrix[0][y] * self.cofactor(0 , __lowerCamelCase ) for y in range(self.__width )
]
return sum(__lowerCamelCase )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Matrix:
A_ = [[0] * n for _ in range(UpperCAmelCase__ )]
return Matrix(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Matrix:
random.seed(UpperCAmelCase__ )
A_ = [
[random.randint(UpperCAmelCase__, UpperCAmelCase__ ) for _ in range(UpperCAmelCase__ )] for _ in range(UpperCAmelCase__ )
]
return Matrix(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
| 368 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ = 10_00 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 101 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _snake_case ( snake_case__ : Union[str, Any] ):
A = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Any = StableDiffusionLatentUpscalePipeline
_lowerCamelCase: Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
_lowerCamelCase: List[str] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
_lowerCamelCase: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_lowerCamelCase: Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_lowerCamelCase: Optional[int] = frozenset([] )
_lowerCamelCase: Tuple = True
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = 1
A = 4
A = (16, 16)
A = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(A_ )
return image
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
torch.manual_seed(0 )
A = UNetaDConditionModel(
act_fn='gelu' ,attention_head_dim=8 ,norm_num_groups=A_ ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) ,in_channels=8 ,mid_block_type=A_ ,only_cross_attention=A_ ,out_channels=5 ,resnet_time_scale_shift='scale_shift' ,time_embedding_type='fourier' ,timestep_post_act='gelu' ,up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') ,)
A = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
A = EulerDiscreteScheduler(prediction_type='sample' )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='quick_gelu' ,projection_dim=512 ,)
A = CLIPTextModel(A_ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any]=0 ) -> List[Any]:
if str(A_ ).startswith('mps' ):
A = torch.manual_seed(A_ )
else:
A = torch.Generator(device=A_ ).manual_seed(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = 'cpu'
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
A = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A_ ,1e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
A = self.get_dummy_components()
A = self.pipeline_class(**A_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs(A_ )
A = 2
A = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
A = getattr(A_ ,scheduler_enum.name )
A = scheduler_cls.from_config(pipe.scheduler.config )
A = pipe(**A_ )[0]
outputs.append(A_ )
assert check_same_shape(A_ )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = torch.manual_seed(33 )
A = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ,torch_dtype=torch.floataa )
pipe.to('cuda' )
A = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
A = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
A = pipe(A_ ,generator=A_ ,output_type='latent' ).images
A = upscaler(
prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = torch.manual_seed(33 )
A = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
A = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
A = upscaler(
prompt=A_ ,image=A_ ,num_inference_steps=20 ,guidance_scale=0 ,generator=A_ ,output_type='np' ,).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5e-2 | 74 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[Any] = RobertaEmbeddings(__a)
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , a , )
class UpperCAmelCase_ ( a):
lowerCamelCase__ = RobertaConfig
lowerCamelCase__ = 'roberta'
def __init__( self, __a):
'''simple docstring'''
super().__init__(__a)
_lowerCAmelCase : Optional[int] = config.num_labels
_lowerCAmelCase : Optional[int] = config.num_hidden_layers
_lowerCAmelCase : Optional[int] = DeeRobertaModel(__a)
_lowerCAmelCase : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob)
_lowerCAmelCase : List[str] = nn.Linear(config.hidden_size, self.config.num_labels)
@add_start_docstrings_to_model_forward(__a)
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=-1, __a=False, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.num_layers
try:
_lowerCAmelCase : List[Any] = self.roberta(
__a, attention_mask=__a, token_type_ids=__a, position_ids=__a, head_mask=__a, inputs_embeds=__a, )
_lowerCAmelCase : List[Any] = outputs[1]
_lowerCAmelCase : Dict = self.dropout(__a)
_lowerCAmelCase : Dict = self.classifier(__a)
_lowerCAmelCase : Optional[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : Union[str, Any] = e.exit_layer
_lowerCAmelCase : List[Any] = outputs[0]
if not self.training:
_lowerCAmelCase : int = entropy(__a)
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Optional[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Optional[Any] = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
_lowerCAmelCase : Optional[int] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Any = highway_exit[0]
if not self.training:
highway_logits_all.append(__a)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[str] = MSELoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
_lowerCAmelCase : Dict = CrossEntropyLoss()
_lowerCAmelCase : Optional[Any] = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(__a)
if train_highway:
_lowerCAmelCase : int = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Optional[Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 36 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCAmelCase__ ( datasets.BeamBasedBuilder):
def __lowerCamelCase ( self ) -> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=lowercase , )
def __lowerCamelCase ( self , lowercase , lowercase ) -> Optional[Any]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __lowerCamelCase ( self , lowercase , lowercase ) -> Tuple:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowercase )
class UpperCAmelCase__ ( datasets.BeamBasedBuilder):
def __lowerCamelCase ( self ) -> List[str]:
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=lowercase , )
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __lowerCamelCase ( self , lowercase , lowercase ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowercase )
def _lowercase ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def _lowercase ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class UpperCAmelCase__ ( UpperCAmelCase_):
@require_beam
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase = DummyBeamDataset(cache_dir=lowercase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowercase , builder.name , """default""" , """0.0.0""" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__UpperCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , lowercase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , lowercase )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowercase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __lowerCamelCase ( self ) -> Dict:
import apache_beam as beam
__UpperCamelCase = beam.io.parquetio.WriteToParquet
__UpperCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase = DummyBeamDataset(cache_dir=lowercase , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
__UpperCamelCase = partial(lowercase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowercase , builder.name , """default""" , """0.0.0""" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowercase , builder.name , """default""" , """0.0.0""" , f"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
__UpperCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , lowercase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , lowercase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(lowercase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __lowerCamelCase ( self ) -> int:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase = DummyBeamDataset(cache_dir=lowercase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
__UpperCamelCase = NestedBeamDataset(cache_dir=lowercase , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowercase , builder.name , """default""" , """0.0.0""" , f"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
__UpperCamelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , lowercase )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , lowercase )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowercase , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 243 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
a__ : Dict = ''
a__ : List[str] = ''
a__ : Optional[Any] = ''
a__ : Any = ''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = tweepy.OAuthHandler(__A ,__A )
auth.set_access_token(__A ,__A )
__UpperCamelCase = tweepy.API(__A )
# initialize a list to hold all the tweepy Tweets
__UpperCamelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__UpperCamelCase = api.user_timeline(screen_name=__A ,count=200 )
# save most recent tweets
alltweets.extend(__A )
# save the id of the oldest tweet less one
__UpperCamelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__A ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__UpperCamelCase = api.user_timeline(
screen_name=__A ,count=200 ,max_id=__A )
# save most recent tweets
alltweets.extend(__A )
# update the id of the oldest tweet less one
__UpperCamelCase = alltweets[-1].id - 1
print(f"...{len(__A )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__UpperCamelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" ,"""w""" ) as f:
__UpperCamelCase = csv.writer(__A )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(__A )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 243 | 1 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 168 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """openai/whisper-base"""
_lowerCAmelCase = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_lowerCAmelCase = """transcriber"""
_lowerCAmelCase = WhisperProcessor
_lowerCAmelCase = WhisperForConditionalGeneration
_lowerCAmelCase = ["""audio"""]
_lowerCAmelCase = ["""text"""]
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
return self.pre_processor(__magic_name__ , return_tensors='pt' ).input_features
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
return self.model.generate(inputs=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[str]:
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
| 168 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
lowerCAmelCase__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase__ = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase__ = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase__ = model(lowerCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1e-3 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
lowerCAmelCase__ = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase__ = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase__ = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase__ = model(lowerCamelCase_ )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1e-3 ) ) | 365 |
'''simple docstring'''
import argparse
import json
import subprocess
def _snake_case ( A , A ) -> Tuple:
lowerCAmelCase__ = []
lowerCAmelCase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowerCAmelCase__ = subprocess.run(A , shell=A , stdout=subprocess.PIPE )
lowerCAmelCase__ = output.stdout.decode('''utf-8''' )
lowerCAmelCase__ = json.loads(A )
lowerCAmelCase__ = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(A )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(A ) )
if len(A ) > 0:
lowerCAmelCase__ = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def _snake_case ( A ) -> Optional[Any]:
return values.split(''',''' )
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
__UpperCAmelCase = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 228 | 0 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a__ : Any = sys.version_info >= (3, 1_0)
def _UpperCamelCase ( __A=None , __A=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
__UpperCAmelCase = 42
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class lowercase_ :
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = None
class lowercase_ ( a__ ):
__UpperCAmelCase = 'titi'
__UpperCAmelCase = 'toto'
class lowercase_ ( a__ ):
__UpperCAmelCase = 'titi'
__UpperCAmelCase = 'toto'
__UpperCAmelCase = 42
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
def __a ( self ):
UpperCamelCase__ = BasicEnum(self.foo )
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
def __a ( self ):
UpperCamelCase__ = MixedTypeEnum(self.foo )
@dataclass
class lowercase_ :
__UpperCAmelCase = None
__UpperCAmelCase = field(default=a__ , metadata={'help': 'help message'} )
__UpperCAmelCase = None
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[] )
@dataclass
class lowercase_ :
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[1, 2, 3] )
__UpperCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
__UpperCAmelCase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class lowercase_ :
__UpperCAmelCase = field()
__UpperCAmelCase = field()
__UpperCAmelCase = field()
def __a ( self ):
UpperCamelCase__ = BasicEnum(self.required_enum )
@dataclass
class lowercase_ :
__UpperCAmelCase = 42
__UpperCAmelCase = field()
__UpperCAmelCase = None
__UpperCAmelCase = field(default='toto' , metadata={'help': 'help message'} )
__UpperCAmelCase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class lowercase_ :
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = None
@dataclass
class lowercase_ :
__UpperCAmelCase = None
__UpperCAmelCase = field(default=a__ , metadata={'help': 'help message'} )
__UpperCAmelCase = None
__UpperCAmelCase = list_field(default=[] )
__UpperCAmelCase = list_field(default=[] )
class lowercase_ ( unittest.TestCase ):
def __a ( self , a , a ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCamelCase__ = {k: v for k, v in vars(a ).items() if k != "container"}
UpperCamelCase__ = {k: v for k, v in vars(a ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , a ) and yy.get("choices" , a ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](a ) , yy["type"](a ) )
del xx["type"], yy["type"]
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , required=a )
expected.add_argument("--bar" , type=a , required=a )
expected.add_argument("--baz" , type=a , required=a )
expected.add_argument("--flag" , type=a , default=a , const=a , nargs="?" )
self.argparsersEqual(a , a )
UpperCamelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((UpperCamelCase__) , ) = parser.parse_args_into_dataclasses(a , look_for_args_file=a )
self.assertFalse(example.flag )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=a )
expected.add_argument("--baz" , default="toto" , type=a , help="help message" )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , default=a , const=a , nargs="?" )
expected.add_argument("--baz" , type=a , default=a , const=a , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=a , dest="baz" )
expected.add_argument("--opt" , type=a , default=a )
UpperCamelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
UpperCamelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(a , Namespace(foo=a , baz=a , opt=a ) )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCamelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCamelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
UpperCamelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __a ( self ):
@dataclass
class lowercase_ :
__UpperCAmelCase = "toto"
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
UpperCamelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
UpperCamelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=a )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=a )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(
a , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCamelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(a , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __a ( self ):
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=a , type=a )
expected.add_argument("--bar" , default=a , type=a , help="help message" )
expected.add_argument("--baz" , default=a , type=a )
expected.add_argument("--ces" , nargs="+" , default=[] , type=a )
expected.add_argument("--des" , nargs="+" , default=[] , type=a )
UpperCamelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(a )
for dataclass_type in dataclass_types:
UpperCamelCase__ = HfArgumentParser(a )
self.argparsersEqual(a , a )
UpperCamelCase__ = parser.parse_args([] )
self.assertEqual(a , Namespace(foo=a , bar=a , baz=a , ces=[] , des=[] ) )
UpperCamelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(a , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=a , required=a )
expected.add_argument("--required_str" , type=a , required=a )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a , )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=a , required=a )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=a , )
expected.add_argument("--opt" , type=a , default=a )
expected.add_argument("--baz" , default="toto" , type=a , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=a )
self.argparsersEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
UpperCamelCase__ = parser.parse_dict(a )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(a , parser.parse_dict , a , allow_extra_keys=a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a , "temp_json" )
os.mkdir(a )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(a , a )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
UpperCamelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(a , "temp_yaml" )
os.mkdir(a )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(a , a )
UpperCamelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
UpperCamelCase__ = BasicExample(**a )
self.assertEqual(a , a )
def __a ( self ):
UpperCamelCase__ = HfArgumentParser(a )
self.assertIsNotNone(a )
| 80 |
'''simple docstring'''
def _UpperCamelCase ( __A ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _UpperCamelCase ( __A = 100 ) -> int:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = 2
for i in range(2 , max_n + 1 ):
UpperCamelCase__ = pre_numerator
UpperCamelCase__ = 2 * i // 3 if i % 3 == 0 else 1
UpperCamelCase__ = cur_numerator
UpperCamelCase__ = e_cont * pre_numerator + temp
return sum_digits(__A )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 80 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase : List[Any] = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : str = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase__ ( lowerCamelCase ):
if is_torch_version("<", "2.0.0" ) or not hasattr(lowerCamelCase, "_dynamo" ):
return False
return isinstance(lowerCamelCase, torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = True ):
lowercase :Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase :str = is_compiled_module(lowerCamelCase )
if is_compiled:
lowercase :str = model
lowercase :str = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Any = model.module
if not keep_fpaa_wrapper:
lowercase :List[Any] = getattr(lowerCamelCase, "forward" )
lowercase :Union[str, Any] = model.__dict__.pop("_original_forward", lowerCamelCase )
if original_forward is not None:
while hasattr(lowerCamelCase, "__wrapped__" ):
lowercase :Tuple = forward.__wrapped__
if forward == original_forward:
break
lowercase :Tuple = forward
if getattr(lowerCamelCase, "_converted_to_transformer_engine", lowerCamelCase ):
convert_model(lowerCamelCase, to_transformer_engine=lowerCamelCase )
if is_compiled:
lowercase :List[Any] = model
lowercase :Optional[int] = compiled_model
return model
def UpperCAmelCase__ ( ):
PartialState().wait_for_everyone()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCamelCase, lowerCamelCase )
elif PartialState().local_process_index == 0:
torch.save(lowerCamelCase, lowerCamelCase )
@contextmanager
def UpperCAmelCase__ ( **lowerCamelCase ):
for key, value in kwargs.items():
lowercase :List[str] = str(lowerCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase__ ( lowerCamelCase ):
if not hasattr(lowerCamelCase, "__qualname__" ) and not hasattr(lowerCamelCase, "__name__" ):
lowercase :Optional[int] = getattr(lowerCamelCase, "__class__", lowerCamelCase )
if hasattr(lowerCamelCase, "__qualname__" ):
return obj.__qualname__
if hasattr(lowerCamelCase, "__name__" ):
return obj.__name__
return str(lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for key, value in source.items():
if isinstance(lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = destination.setdefault(lowerCamelCase, {} )
merge_dicts(lowerCamelCase, lowerCamelCase )
else:
lowercase :Optional[Any] = value
return destination
def UpperCAmelCase__ ( lowerCamelCase = None ):
if port is None:
lowercase :Tuple = 29500
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 158 | 1 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowercase__ ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = 1
@register_to_config
def __init__( self, __magic_name__ = 1000, __magic_name__ = None ) -> Tuple:
"""simple docstring"""
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(__magic_name__ )
# standard deviation of the initial noise distribution
UpperCamelCase__ : List[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase__ : Optional[int] = 4
# running values
UpperCamelCase__ : Optional[Any] = []
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = num_inference_steps
UpperCamelCase__ : Tuple = torch.linspace(1, 0, num_inference_steps + 1 )[:-1]
UpperCamelCase__ : Any = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase__ : Dict = torch.tensor(self.config.trained_betas, dtype=torch.floataa )
else:
UpperCamelCase__ : Tuple = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase__ : str = (1.0 - self.betas**2) ** 0.5
UpperCamelCase__ : List[str] = (torch.atana(self.betas, self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase__ : int = timesteps.to(__magic_name__ )
UpperCamelCase__ : Tuple = []
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = True, ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
UpperCamelCase__ : str = (self.timesteps == timestep).nonzero().item()
UpperCamelCase__ : int = timestep_index + 1
UpperCamelCase__ : Any = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(__magic_name__ )
if len(self.ets ) == 1:
UpperCamelCase__ : Optional[Any] = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase__ : int = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase__ : Optional[int] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase__ : Tuple = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase__ : Optional[int] = self._get_prev_sample(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, *__magic_name__, **__magic_name__ ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = self.alphas[timestep_index]
UpperCamelCase__ : Optional[int] = self.betas[timestep_index]
UpperCamelCase__ : int = self.alphas[prev_timestep_index]
UpperCamelCase__ : Dict = self.betas[prev_timestep_index]
UpperCamelCase__ : Any = (sample - sigma * ets) / max(__magic_name__, 1E-8 )
UpperCamelCase__ : Any = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 201 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Tuple = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase__ : str = input_file.read()
UpperCamelCase__ : List[Any] = regexp.search(__magic_name__ )
return match
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
UpperCamelCase__ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Tuple = regexp.finditer(__magic_name__ )
UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = Path('''./datasets''' )
UpperCamelCase__ : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__magic_name__ ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path('''./datasets''' )
UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__magic_name__ ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 201 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "gptj"
snake_case__ : Union[str, Any] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str , __lowerCAmelCase : str=5_0_4_0_0 , __lowerCAmelCase : Optional[int]=2_0_4_8 , __lowerCAmelCase : List[str]=4_0_9_6 , __lowerCAmelCase : Dict=2_8 , __lowerCAmelCase : Any=1_6 , __lowerCAmelCase : Optional[Any]=6_4 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any="gelu_new" , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[str]=1E-5 , __lowerCAmelCase : List[str]=0.02 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : int=5_0_2_5_6 , __lowerCAmelCase : int=5_0_2_5_6 , __lowerCAmelCase : Union[str, Any]=False , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Union[str, Any] = n_positions
_lowerCamelCase : int = n_embd
_lowerCamelCase : Tuple = n_layer
_lowerCamelCase : Any = n_head
_lowerCamelCase : Optional[Any] = n_inner
_lowerCamelCase : Union[str, Any] = rotary_dim
_lowerCamelCase : List[Any] = activation_function
_lowerCamelCase : Any = resid_pdrop
_lowerCamelCase : List[str] = embd_pdrop
_lowerCamelCase : Dict = attn_pdrop
_lowerCamelCase : Any = layer_norm_epsilon
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Union[str, Any] = use_cache
_lowerCamelCase : Optional[int] = bos_token_id
_lowerCamelCase : Any = eos_token_id
super().__init__(
bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase )
class __snake_case ( _lowercase):
def __init__( self : List[str] , __lowerCAmelCase : PretrainedConfig , __lowerCAmelCase : str = "default" , __lowerCAmelCase : List[PatchingSpec] = None , __lowerCAmelCase : bool = False , ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , task=__lowerCAmelCase , patching_specs=__lowerCAmelCase , use_past=__lowerCAmelCase )
if not getattr(self._config , '''pad_token_id''' , __lowerCAmelCase ):
# TODO: how to do that better?
_lowerCamelCase : Tuple = 0
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[Any] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCAmelCase , direction='''inputs''' )
_lowerCamelCase : Union[str, Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
_lowerCamelCase : str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self._config.n_head
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : PreTrainedTokenizer , __lowerCAmelCase : int = -1 , __lowerCAmelCase : int = -1 , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = super(__lowerCAmelCase , self ).generate_dummy_inputs(
__lowerCAmelCase , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , is_pair=__lowerCAmelCase , framework=__lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
_lowerCamelCase : int = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_lowerCamelCase : int = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_lowerCamelCase : Dict = seqlen + 2
_lowerCamelCase : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCamelCase : Union[str, Any] = [
(torch.zeros(__lowerCAmelCase ), torch.zeros(__lowerCAmelCase )) for _ in range(self.num_layers )
]
_lowerCamelCase : Any = common_inputs['''attention_mask''']
if self.use_past:
_lowerCamelCase : Tuple = ordered_inputs['''attention_mask'''].dtype
_lowerCamelCase : Union[str, Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(__lowerCAmelCase , __lowerCAmelCase , dtype=__lowerCAmelCase )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return 1_3
| 357 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _lowercase , unittest.TestCase):
snake_case__ : Optional[int] = BlenderbotSmallTokenizer
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
super().setUp()
_lowerCamelCase : str = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
_lowerCamelCase : Any = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
_lowerCamelCase : Any = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
_lowerCamelCase : List[str] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
_lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = '''adapt act apte'''
_lowerCamelCase : Tuple = '''adapt act apte'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : int = '''adapt act apte'''
_lowerCamelCase : Optional[Any] = ['''adapt''', '''act''', '''ap@@''', '''te''']
_lowerCamelCase : Optional[Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCamelCase : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1_3_8_4]
_lowerCamelCase : List[str] = '''I am a small frog.'''
_lowerCamelCase : str = tok([src_text] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : Any = tok.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
_lowerCamelCase : Optional[Any] = '''I am a small frog .'''
_lowerCamelCase : str = '''.'''
_lowerCamelCase : str = tok(__lowerCAmelCase )['''input_ids''']
_lowerCamelCase : Dict = tok(__lowerCAmelCase )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 175 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 263 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 1 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE : list[int | str] ):
"""simple docstring"""
create_state_space_tree(SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(SCREAMING_SNAKE_CASE ) )] )
def _a ( SCREAMING_SNAKE_CASE : list[int | str] , SCREAMING_SNAKE_CASE : list[int | str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , ):
"""simple docstring"""
if index == len(SCREAMING_SNAKE_CASE ):
print(SCREAMING_SNAKE_CASE )
return
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ : Optional[Any] = True
create_state_space_tree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 , SCREAMING_SNAKE_CASE )
current_sequence.pop()
UpperCamelCase__ : Dict = False
__UpperCamelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCamelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 365 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__UpperCamelCase : Optional[int] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__UpperCamelCase : Optional[int] = parser.parse_args()
__UpperCamelCase : Union[str, Any] = "cpu"
__UpperCamelCase : Dict = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__UpperCamelCase : int = "path-to-your-trained-model"
__UpperCamelCase : List[str] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__UpperCamelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase : Optional[Any] = pipe.to(device)
# to channels last
__UpperCamelCase : Tuple = pipe.unet.to(memory_format=torch.channels_last)
__UpperCamelCase : Optional[int] = pipe.vae.to(memory_format=torch.channels_last)
__UpperCamelCase : int = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__UpperCamelCase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__UpperCamelCase : Tuple = torch.randn(2, 4, 64, 64)
__UpperCamelCase : Any = torch.rand(1) * 999
__UpperCamelCase : Any = torch.randn(2, 77, 768)
__UpperCamelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
__UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__UpperCamelCase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : Tuple = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__UpperCamelCase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__UpperCamelCase : List[Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__UpperCamelCase : Optional[Any] = 666
__UpperCamelCase : int = torch.Generator(device).manual_seed(seed)
__UpperCamelCase : int = {"generator": generator}
if args.steps is not None:
__UpperCamelCase : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__UpperCamelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 51 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Dict = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _snake_case ( A__ ):
_lowercase : Optional[int] = '''levit'''
def __init__( self , a=224 , a=3 , a=3 , a=2 , a=1 , a=16 , a=[128, 256, 384] , a=[4, 8, 12] , a=[4, 4, 4] , a=[16, 16, 16] , a=0 , a=[2, 2, 2] , a=[2, 2, 2] , a=0.02 , **a , ) -> Any:
super().__init__(**a)
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = kernel_size
SCREAMING_SNAKE_CASE = stride
SCREAMING_SNAKE_CASE = padding
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = key_dim
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = attention_ratio
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _snake_case ( A__ ):
_lowercase : str = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return 1E-4
| 137 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase : Any = 16
__UpperCamelCase : Union[str, Any] = 32
def __A ( __lowerCamelCase , __lowerCamelCase = 16 ) -> List[str]:
a = AutoTokenizer.from_pretrained("""bert-base-cased""" )
a = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
a = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a = 16
elif accelerator.mixed_precision != "no":
a = 8
else:
a = None
return tokenizer.pad(
__lowerCamelCase , padding="""longest""" , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
a = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
a = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCamelCase : Any = mocked_dataloaders # noqa: F811
def __A ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCamelCase ) == "1":
a = 2
# New Code #
a = int(args.gradient_accumulation_steps )
# Initialize accelerator
a = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["""lr"""]
a = int(config["""num_epochs"""] )
a = int(config["""seed"""] )
a = int(config["""batch_size"""] )
a = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCamelCase )
a , a = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Instantiate optimizer
a = AdamW(params=model.parameters() , lr=__lowerCamelCase )
# Instantiate scheduler
a = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCamelCase ):
a = model(**__lowerCamelCase )
a = output.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a = model(**__lowerCamelCase )
a = outputs.logits.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
a = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __lowerCamelCase )
def __A ( ) -> int:
a = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCamelCase , default=__lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
a = parser.parse_args()
a = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 228 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCAmelCase_( lowercase_="" ) -> str:
_lowerCamelCase = tempfile.mkdtemp()
return os.path.join(lowercase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_lowerCamelCase , _lowerCamelCase = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCamelCase__ , lowerCamelCase__ , 1_6_0_0_0 )
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase__ )
@require_vision
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = '''Hey!'''
_lowerCamelCase = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , agent_type.to_string() )
self.assertEqual(lowerCamelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 352 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : List[str] ) -> Optional[Any]:
_lowerCamelCase = len(lowercase_ )
while cur > 1:
# Find the maximum number in arr
_lowerCamelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCamelCase = arr[mi::-1] + arr[mi + 1 : len(lowercase_ )]
# Reverse whole list
_lowerCamelCase = arr[cur - 1 :: -1] + arr[cur : len(lowercase_ )]
cur -= 1
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = input('''Enter numbers separated by a comma:\n''').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 73 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Any = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __snake_case ( __A ):
_a = '''open-llama'''
def __init__( self : List[Any] , A_ : str=1_0_0_0_0_0 , A_ : List[str]=4_0_9_6 , A_ : Any=1_1_0_0_8 , A_ : List[str]=3_2 , A_ : List[str]=3_2 , A_ : List[str]="silu" , A_ : Union[str, Any]=2_0_4_8 , A_ : Dict=0.02 , A_ : Union[str, Any]=1e-6 , A_ : Union[str, Any]=True , A_ : str=0 , A_ : List[Any]=1 , A_ : List[str]=2 , A_ : str=False , A_ : Dict=True , A_ : Optional[int]=0.1 , A_ : Tuple=0.1 , A_ : Union[str, Any]=True , A_ : str=True , A_ : List[Any]=None , **A_ : List[str] , ):
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Dict = rms_norm_eps
lowerCAmelCase_ : Optional[Any] = use_cache
lowerCAmelCase_ : int = kwargs.pop(
'''use_memorry_efficient_attention''' , _lowerCamelCase)
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = attention_dropout_prob
lowerCAmelCase_ : Optional[int] = use_stable_embedding
lowerCAmelCase_ : List[Any] = shared_input_output_embedding
lowerCAmelCase_ : int = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , )
def UpperCAmelCase__ ( self : Optional[int]):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCamelCase) or len(self.rope_scaling) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""")
lowerCAmelCase_ : List[Any] = self.rope_scaling.get('''type''' , _lowerCamelCase)
lowerCAmelCase_ : Union[str, Any] = self.rope_scaling.get('''factor''' , _lowerCamelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""")
if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""")
| 103 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ : int = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 344 | 0 |
from typing import Any
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
_validation(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
# Creates data structures and fill initial step
lowercase__ : dict = {}
lowercase__ : dict = {}
for state in states_space:
lowercase__ : Any = observations_space[0]
lowercase__ : Optional[int] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowercase__ : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCamelCase__ ) ):
lowercase__ : List[Any] = observations_space[o]
lowercase__ : Union[str, Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowercase__ : Any = ""
lowercase__ : Any = -1
for k_state in states_space:
lowercase__ : Optional[int] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowercase__ : List[str] = probability
lowercase__ : List[Any] = k_state
# Update probabilities and pointers dicts
lowercase__ : Dict = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowercase__ : Optional[int] = arg_max
# The final observation
lowercase__ : Optional[int] = observations_space[len(lowerCamelCase__ ) - 1]
# argmax for given final observation
lowercase__ : int = ""
lowercase__ : Optional[int] = -1
for k_state in states_space:
lowercase__ : int = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowercase__ : Tuple = probability
lowercase__ : Optional[Any] = k_state
lowercase__ : Union[str, Any] = arg_max
# Process pointers backwards
lowercase__ : int = last_state
lowercase__ : int = []
for o in range(len(lowerCamelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCamelCase__ )
lowercase__ : Tuple = pointers[previous, observations_space[o]]
result.reverse()
return result
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
_validate_not_empty(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
_validate_lists(lowerCamelCase__ , lowerCamelCase__ )
_validate_dicts(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
_validate_list(lowerCamelCase__ , "observations_space" )
_validate_list(lowerCamelCase__ , "states_space" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if not isinstance(_object , lowerCamelCase__ ):
lowercase__ : Tuple = F"""{var_name} must be a list"""
raise ValueError(lowerCamelCase__ )
else:
for x in _object:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = F"""{var_name} must be a list of strings"""
raise ValueError(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
_validate_dict(lowerCamelCase__ , "initial_probabilities" , lowerCamelCase__ )
_validate_nested_dict(lowerCamelCase__ , "transition_probabilities" )
_validate_nested_dict(lowerCamelCase__ , "emission_probabilities" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
_validate_dict(_object , lowerCamelCase__ , lowerCamelCase__ )
for x in _object.values():
_validate_dict(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
if not isinstance(_object , lowerCamelCase__ ):
lowercase__ : int = F"""{var_name} must be a dict"""
raise ValueError(lowerCamelCase__ )
if not all(isinstance(lowerCamelCase__ , lowerCamelCase__ ) for x in _object ):
lowercase__ : Optional[Any] = F"""{var_name} all keys must be strings"""
raise ValueError(lowerCamelCase__ )
if not all(isinstance(lowerCamelCase__ , lowerCamelCase__ ) for x in _object.values() ):
lowercase__ : Optional[int] = "nested dictionary " if nested else ""
lowercase__ : Optional[int] = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(lowerCamelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 121 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bridgetower_vision_model"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Dict=768 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Dict=16 , SCREAMING_SNAKE_CASE : List[Any]=288 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[str]=1E-0_5 , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Tuple=False , **SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : str = num_hidden_layers
lowercase__ : str = num_channels
lowercase__ : Optional[int] = patch_size
lowercase__ : Dict = image_size
lowercase__ : List[Any] = initializer_factor
lowercase__ : int = layer_norm_eps
lowercase__ : List[str] = stop_gradient
lowercase__ : Optional[int] = share_layernorm
lowercase__ : Optional[int] = remove_last_layer
@classmethod
def snake_case ( cls : Tuple , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : str ):
lowercase__ , lowercase__ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get("model_type" ) == "bridgetower":
lowercase__ : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bridgetower_text_model"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any]=50_265 , SCREAMING_SNAKE_CASE : Optional[int]=768 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : Union[str, Any]=12 , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : Any=3_072 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Tuple=514 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : Dict=1E-0_5 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : str=0 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE : int=True , **SCREAMING_SNAKE_CASE : int , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = vocab_size
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : int = hidden_act
lowercase__ : Optional[Any] = initializer_factor
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : str = type_vocab_size
lowercase__ : str = layer_norm_eps
lowercase__ : Dict = position_embedding_type
lowercase__ : Optional[Any] = use_cache
lowercase__ : List[str] = pad_token_id
lowercase__ : Optional[Any] = bos_token_id
lowercase__ : Optional[Any] = eos_token_id
@classmethod
def snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE : str ):
lowercase__ , lowercase__ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if config_dict.get("model_type" ) == "bridgetower":
lowercase__ : List[Any] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """bridgetower"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : Any=768 , SCREAMING_SNAKE_CASE : Dict=1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1E-0_5 , SCREAMING_SNAKE_CASE : Tuple=False , SCREAMING_SNAKE_CASE : List[Any]="add" , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=6 , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[Any]=False , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : List[Any] , ):
# TODO: remove this once the Hub files are updated.
lowercase__ : int = kwargs.pop("text_config_dict" , SCREAMING_SNAKE_CASE )
lowercase__ : int = kwargs.pop("vision_config_dict" , SCREAMING_SNAKE_CASE )
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = share_cross_modal_transformer_layers
lowercase__ : int = hidden_act
lowercase__ : int = hidden_size
lowercase__ : Optional[int] = initializer_factor
lowercase__ : Optional[int] = layer_norm_eps
lowercase__ : str = share_link_tower_layers
lowercase__ : Optional[int] = link_tower_type
lowercase__ : List[Any] = num_attention_heads
lowercase__ : Dict = num_hidden_layers
lowercase__ : Dict = tie_word_embeddings
lowercase__ : Optional[Any] = init_layernorm_from_vision_encoder
if text_config is None:
lowercase__ : Optional[Any] = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
lowercase__ : List[Any] = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
lowercase__ : Any = BridgeTowerTextConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = BridgeTowerVisionConfig(**SCREAMING_SNAKE_CASE )
@classmethod
def snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE : BridgeTowerTextConfig , SCREAMING_SNAKE_CASE : BridgeTowerVisionConfig , **SCREAMING_SNAKE_CASE : List[str] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
lowercase__ : Dict = self.text_config.to_dict()
lowercase__ : Tuple = self.vision_config.to_dict()
lowercase__ : Optional[Any] = self.__class__.model_type
return output
| 121 | 1 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase_ = 300 # TEMPERATURE (unit = K)
def lowerCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ) ->float:
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : Optional[int] = 42
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : str = None
def _snake_case ( ):
UpperCAmelCase : Tuple = Node(1 )
UpperCAmelCase : Dict = Node(2 )
UpperCAmelCase : int = Node(3 )
UpperCAmelCase : Tuple = Node(4 )
UpperCAmelCase : Tuple = Node(5 )
return tree
def _snake_case ( UpperCamelCase : Union[str, Any] ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _snake_case ( UpperCamelCase : Dict ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _snake_case ( UpperCamelCase : Any ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _snake_case ( UpperCamelCase : Tuple ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _snake_case ( UpperCamelCase : str ):
UpperCAmelCase : List[str] = []
if root is None:
return output
UpperCAmelCase : List[str] = deque([root] )
while process_queue:
UpperCAmelCase : Tuple = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : List[Any] ):
UpperCAmelCase : str = []
def populate_output(UpperCamelCase : Dict , UpperCamelCase : Optional[int] ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : List[Any] ):
UpperCAmelCase : Any = []
def populate_output(UpperCamelCase : int , UpperCamelCase : Any ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCamelCase__ , lowerCamelCase__ )
return output
def _snake_case ( UpperCamelCase : str ):
if root is None:
return []
UpperCAmelCase : int = []
UpperCAmelCase : int = 0
UpperCAmelCase : int = height(lowerCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCAmelCase : int = 1
else:
output.append(get_nodes_from_right_to_left(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCAmelCase : Union[str, Any] = 0
return output
def _snake_case ( ): # Main function for testing.
UpperCAmelCase : Optional[int] = make_tree()
print(F"In-order Traversal: {inorder(lowerCamelCase__ )}" )
print(F"Pre-order Traversal: {preorder(lowerCamelCase__ )}" )
print(F"Post-order Traversal: {postorder(lowerCamelCase__ )}" , """\n""" )
print(F"Height of Tree: {height(lowerCamelCase__ )}" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(lowerCamelCase__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(lowerCamelCase__ ) + 1 ):
print(F"Level {level}:" , get_nodes_from_left_to_right(lowerCamelCase__ , level=lowerCamelCase__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(lowerCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 353 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : dict ):
UpperCAmelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase : set[int] = set()
return any(
node not in visited and depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for node in graph )
def _snake_case ( UpperCamelCase : dict , UpperCamelCase : int , UpperCamelCase : set , UpperCamelCase : set ):
visited.add(UpperCamelCase )
rec_stk.add(UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 76 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_UpperCAmelCase = None
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
_UpperCAmelCase = """▁"""
class a ( UpperCAmelCase__ ):
UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[int] = ['input_ids', 'attention_mask']
UpperCamelCase : Any = BarthezTokenizer
def __init__( self : List[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[int]="<s>" , lowerCAmelCase : str="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : List[Any]="<s>" , lowerCAmelCase : Dict="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : Any="<mask>" , **lowerCAmelCase : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: int =vocab_file
SCREAMING_SNAKE_CASE_: str =False if not self.vocab_file else True
def lowerCamelCase__ ( self : int , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] =[self.cls_token_id]
SCREAMING_SNAKE_CASE_: List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[self.sep_token_id]
SCREAMING_SNAKE_CASE_: str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase__ ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE_: Optional[int] =os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase ):
copyfile(self.vocab_file , lowerCAmelCase )
return (out_vocab_file,)
| 173 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __magic_name__ ( *lowercase ):
if not isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =list(lowercase )
for i in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE_: Optional[Any] =None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: List[Any] =[
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowercase , lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __magic_name__ ( lowercase = None , lowercase = 128 ):
if function is None:
return functools.partial(lowercase , starting_batch_size=lowercase )
SCREAMING_SNAKE_CASE_: str =starting_batch_size
def decorator(*lowercase , **lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_: Optional[int] =list(inspect.signature(lowercase ).parameters.keys() )
# Guard against user error
if len(lowercase ) < (len(lowercase ) + 1):
SCREAMING_SNAKE_CASE_: List[Any] =""", """.join([f'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
f'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowercase , *lowercase , **lowercase )
except Exception as e:
if should_reduce_batch_size(lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 173 | 1 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 0
while b > 0:
if b & 1:
SCREAMING_SNAKE_CASE : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 357 |
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCamelCase__ : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Tuple = {
'''unc-nlp/lxmert-base-uncased''': 5_12,
}
UpperCamelCase__ : Dict = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[str] = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_VOCAB_FILES_MAP
_A : List[Any] = PRETRAINED_INIT_CONFIGURATION
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = LxmertTokenizer
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Optional[Any]="[UNK]" , lowerCAmelCase__ : str="[SEP]" , lowerCAmelCase__ : Optional[int]="[PAD]" , lowerCAmelCase__ : Tuple="[CLS]" , lowerCAmelCase__ : str="[MASK]" , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Tuple , ):
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(lowerCAmelCase__ , normalizer_state.pop("""type""" ) )
__SCREAMING_SNAKE_CASE : Tuple = do_lower_case
__SCREAMING_SNAKE_CASE : Tuple = strip_accents
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenize_chinese_chars
__SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = do_lower_case
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ ) | 112 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
UpperCamelCase__ : List[Any] = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: Optional[int]=None , _lowerCamelCase: str=None ):
__SCREAMING_SNAKE_CASE : Optional[int] = True
while ask_again:
__SCREAMING_SNAKE_CASE : Tuple = input(_lowerCamelCase )
try:
if default is not None and len(_lowerCamelCase ) == 0:
return default
return convert_value(_lowerCamelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowerCamelCase )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Union[str, Any]=[] , _lowerCamelCase: List[Any]=None , _lowerCamelCase: Optional[Any]=0 ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = BulletMenu(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = menu.run(default_choice=_lowerCamelCase )
return convert_value(_lowerCamelCase ) if convert_value is not None else result
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] ):
__SCREAMING_SNAKE_CASE : List[str] = int(_lowerCamelCase )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def lowerCAmelCase_ ( _lowerCamelCase: Any ):
__SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : Tuple = int(_lowerCamelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] ):
__SCREAMING_SNAKE_CASE : List[str] = int(_lowerCamelCase )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def lowerCAmelCase_ ( _lowerCamelCase: Tuple ):
__SCREAMING_SNAKE_CASE : int = int(_lowerCamelCase )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def lowerCAmelCase_ ( _lowerCamelCase: List[Any] ):
return {"yes": True, "no": False}[value.lower()]
class _UpperCamelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = super()._format_usage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = usage.replace("""<command> [<args>] """ , """""" )
return usage | 112 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Dict:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config ,'steps_offset' ) and scheduler.config.steps_offset != 1:
lowercase_ : Optional[int] = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' ,'1.0.0' ,_a ,standard_warn=_a )
lowercase_ : List[Any] = dict(scheduler.config )
lowercase_ : Union[str, Any] = 1
lowercase_ : Any = FrozenDict(_a )
if hasattr(scheduler.config ,'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
lowercase_ : Union[str, Any] = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' ,'1.0.0' ,_a ,standard_warn=_a )
lowercase_ : Any = dict(scheduler.config )
lowercase_ : Union[str, Any] = True
lowercase_ : Tuple = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=_a ,segmentation_processor=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,unet=_a ,scheduler=_a ,safety_checker=_a ,feature_extractor=_a ,)
def _UpperCAmelCase ( self ,__UpperCamelCase = "auto" ) -> Dict:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase_ : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.enable_attention_slicing(_a )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : Optional[Any] = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a ,_a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
if self.device != torch.device('meta' ) or not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 512 ,__UpperCamelCase = 512 ,__UpperCamelCase = 50 ,__UpperCamelCase = 7.5 ,__UpperCamelCase = None ,__UpperCamelCase = 1 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 1 ,**__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = self.segmentation_processor(
text=[text] ,images=[image] ,padding='max_length' ,return_tensors='pt' ).to(self.device )
lowercase_ : Dict = self.segmentation_model(**_a )
lowercase_ : Optional[int] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase_ : List[str] = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase_ : str = StableDiffusionInpaintPipeline(
vae=self.vae ,text_encoder=self.text_encoder ,tokenizer=self.tokenizer ,unet=self.unet ,scheduler=self.scheduler ,safety_checker=self.safety_checker ,feature_extractor=self.feature_extractor ,)
return inpainting_pipeline(
prompt=_a ,image=_a ,mask_image=_a ,height=_a ,width=_a ,num_inference_steps=_a ,guidance_scale=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,output_type=_a ,return_dict=_a ,callback=_a ,callback_steps=_a ,)
| 361 | """simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ):
with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*__SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
__SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank)
__SCREAMING_SNAKE_CASE =socket.gethostname()
__SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]"
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__SCREAMING_SNAKE_CASE =dist.get_rank()
__SCREAMING_SNAKE_CASE =dist.get_world_size()
printflock(F"{gpu} is OK (global rank: {rank}/{world_size})")
dist.barrier()
if rank == 0:
printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}")
except Exception:
printflock(F"{gpu} is broken")
raise
| 321 | 0 |
from __future__ import annotations
import time
SCREAMING_SNAKE_CASE :Dict = list[tuple[int, int]]
SCREAMING_SNAKE_CASE :Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE :List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,A : int ,A : int ,A : int ,A : int ,A : Node | None ):
__A = pos_x
__A = pos_y
__A = (pos_y, pos_x)
__A = goal_x
__A = goal_y
__A = parent
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ,A : tuple[int, int] ,A : tuple[int, int] ):
__A = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,A )
__A = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,A )
__A = [self.start]
__A = False
def UpperCamelCase_ ( self : Tuple ):
while self.node_queue:
__A = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__A = True
return self.retrace_path(A )
__A = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase_ ( self : List[str] ,A : Node ):
__A = []
for action in delta:
__A = parent.pos_x + action[1]
__A = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A ,A ,self.target.pos_y ,self.target.pos_x ,A ) )
return successors
def UpperCamelCase_ ( self : Optional[int] ,A : Node | None ):
__A = node
__A = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__A = current_node.parent
path.reverse()
return path
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ,A : Tuple ,A : Union[str, Any] ):
__A = BreadthFirstSearch(A ,A )
__A = BreadthFirstSearch(A ,A )
__A = False
def UpperCamelCase_ ( self : Tuple ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__A = self.fwd_bfs.node_queue.pop(0 )
__A = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__A = True
return self.retrace_bidirectional_path(
A ,A )
__A = current_bwd_node
__A = current_fwd_node
__A = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase_ ( self : Dict ,A : Node ,A : Node ):
__A = self.fwd_bfs.retrace_path(A )
__A = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
__A = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE :Optional[int] = (0, 0)
SCREAMING_SNAKE_CASE :List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE :List[Any] = time.time()
SCREAMING_SNAKE_CASE :List[str] = BreadthFirstSearch(init, goal)
SCREAMING_SNAKE_CASE :str = bfs.search()
SCREAMING_SNAKE_CASE :Optional[Any] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
SCREAMING_SNAKE_CASE :List[str] = time.time()
SCREAMING_SNAKE_CASE :Dict = BidirectionalBreadthFirstSearch(init, goal)
SCREAMING_SNAKE_CASE :Dict = bd_bfs.search()
SCREAMING_SNAKE_CASE :Any = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 15 |
import numpy as np
def UpperCAmelCase ( a_ , a_ , a_ = 1E-12 , a_ = 1_0_0 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(a_ )[0] == np.shape(a_ )[1]
# Ensure proper dimensionality.
assert np.shape(a_ )[0] == np.shape(a_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(a_ ) == np.iscomplexobj(a_ )
__A = np.iscomplexobj(a_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(a_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__A = False
__A = 0
__A = 0
__A = 1E12
while not convergence:
# Multiple matrix by the vector.
__A = np.dot(a_ , a_ )
# Normalize the resulting output vector.
__A = w / np.linalg.norm(a_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__A = vector.conj().T if is_complex else vector.T
__A = np.dot(a_ , np.dot(a_ , a_ ) )
# Check convergence.
__A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__A = True
__A = lambda_
if is_complex:
__A = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
__A = np.array([4_1, 4, 2_0] )
__A = real_input_matrix.astype(np.complexaaa )
__A = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__A = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__A = real_input_matrix
__A = real_vector
elif problem_type == "complex":
__A = complex_input_matrix
__A = complex_vector
# Our implementation.
__A , __A = power_iteration(a_ , a_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__A , __A = np.linalg.eigh(a_ )
# Last eigenvalue is the maximum one.
__A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(a_ ) - np.abs(a_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 15 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if os.path.exists(_lowerCamelCase ):
if os.path.exists(os.path.join(_lowerCamelCase , "config.json" ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , "config.json" ) ):
os.remove(os.path.join(_lowerCamelCase , "config.json" ) )
if os.path.exists(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , "pytorch_model.bin" ) ):
os.remove(os.path.join(_lowerCamelCase , "pytorch_model.bin" ) )
else:
os.makedirs(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Tuple = 2
if unlogit:
_lowerCamelCase : List[Any] = torch.pow(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Dict = p * torch.log(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
logger.info("lv, h >\t" + "\t".join(F"""{x + 1}""" for x in range(len(_lowerCamelCase ) ) ) )
for row in range(len(_lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + "\t".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : Any = model.config.num_hidden_layers, model.config.num_attention_heads
_lowerCamelCase : Optional[int] = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
_lowerCamelCase : Dict = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
if head_mask is None:
_lowerCamelCase : Optional[int] = torch.ones(_lowerCamelCase , _lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_lowerCamelCase : str = None
_lowerCamelCase : Optional[Any] = 0.0
_lowerCamelCase : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(_lowerCamelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
_lowerCamelCase : Any = tuple(t.to(args.device ) for t in inputs )
((_lowerCamelCase), ) : List[str] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_lowerCamelCase : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase , head_mask=_lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCamelCase ):
_lowerCamelCase : str = entropy(attn.detach() , _lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_lowerCamelCase : List[Any] = 2
_lowerCamelCase : int = torch.pow(torch.pow(_lowerCamelCase , _lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_lowerCamelCase : List[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(_lowerCamelCase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(_lowerCamelCase )
logger.info("Head ranked by importance scores" )
_lowerCamelCase : int = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_lowerCamelCase : List[str] = torch.arange(
head_importance.numel() , device=args.device )
_lowerCamelCase : Optional[Any] = head_ranks.view_as(_lowerCamelCase )
print_ad_tensor(_lowerCamelCase )
return attn_entropy, head_importance, total_loss
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase )
_lowerCamelCase : Dict = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _lowerCamelCase , original_score * args.masking_threshold )
_lowerCamelCase : Any = torch.ones_like(_lowerCamelCase )
_lowerCamelCase : List[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_lowerCamelCase : int = original_score
while current_score >= original_score * args.masking_threshold:
_lowerCamelCase : Optional[Any] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_lowerCamelCase : Optional[int] = float("Inf" )
_lowerCamelCase : Union[str, Any] = head_importance.view(-1 ).sort()[1]
if len(_lowerCamelCase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
_lowerCamelCase : List[str] = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
_lowerCamelCase : Optional[int] = new_head_mask.view(-1 )
_lowerCamelCase : str = 0.0
_lowerCamelCase : Optional[int] = new_head_mask.view_as(_lowerCamelCase )
_lowerCamelCase : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(_lowerCamelCase )
# Compute metric and head importance again
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , head_mask=_lowerCamelCase )
_lowerCamelCase : Optional[Any] = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(_lowerCamelCase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Dict = datetime.now()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = 1 / loss
_lowerCamelCase : Tuple = datetime.now() - before_time
_lowerCamelCase : Optional[int] = sum(p.numel() for p in model.parameters() )
_lowerCamelCase : List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Dict = [
v,
]
assert sum(len(_lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCamelCase )
_lowerCamelCase : List[str] = sum(p.numel() for p in model.parameters() )
_lowerCamelCase : List[str] = datetime.now()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase , actually_pruned=_lowerCamelCase , )
_lowerCamelCase : List[str] = 1 / loss
_lowerCamelCase : Any = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _lowerCamelCase , _lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _lowerCamelCase , _lowerCamelCase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(_lowerCamelCase , args.output_dir )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_lowerCamelCase , type=_lowerCamelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_lowerCamelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_lowerCamelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_lowerCamelCase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=_lowerCamelCase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_lowerCamelCase , help="Batch size." )
parser.add_argument("--seed" , type=_lowerCamelCase , default=42 )
parser.add_argument("--local_rank" , type=_lowerCamelCase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_lowerCamelCase , default="" , help="Can be used for distant debugging." )
_lowerCamelCase : Dict = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_lowerCamelCase : int = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
_lowerCamelCase : Optional[int] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_lowerCamelCase : Optional[int] = torch.device("cuda" , args.local_rank )
_lowerCamelCase : List[str] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_lowerCamelCase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_lowerCamelCase : Dict = nn.parallel.DistributedDataParallel(
_lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCamelCase )
elif args.n_gpu > 1:
_lowerCamelCase : str = nn.DataParallel(_lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , _lowerCamelCase )
# Prepare dataset
_lowerCamelCase : Optional[int] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_lowerCamelCase : Dict = (torch.from_numpy(_lowerCamelCase ),)
_lowerCamelCase : int = TensorDataset(*_lowerCamelCase )
_lowerCamelCase : Optional[int] = RandomSampler(_lowerCamelCase )
_lowerCamelCase : Tuple = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_lowerCamelCase : Union[str, Any] = mask_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
prune_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main() | 340 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_lowerCAmelCase : List[Any] = get_logger(__name__)
class A_ :
lowerCAmelCase__ = 'dummy_data'
lowerCAmelCase__ = 'datasets'
lowerCAmelCase__ = False
def __init__( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[Version, str] ,__lowerCAmelCase: Optional[str] = None ,__lowerCAmelCase: bool = False ,__lowerCAmelCase: bool = True ,__lowerCAmelCase: Optional[List[Callable]] = None ,):
'''simple docstring'''
_lowerCamelCase : str = 0
_lowerCamelCase : List[str] = dataset_name
_lowerCamelCase : Optional[int] = cache_dir
_lowerCamelCase : Optional[int] = use_local_dummy_data
_lowerCamelCase : int = config
# download_callbacks take a single url as input
_lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_lowerCamelCase : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_lowerCamelCase : Tuple = str(__lowerCAmelCase )
# to be downloaded
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Dict = None
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self._dummy_file is None:
_lowerCamelCase : List[str] = self.download_dummy_data()
return self._dummy_file
@property
def _lowercase ( self: str ):
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_lowerCamelCase : Optional[int] = cached_path(
__lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=__lowerCAmelCase ,force_extract=__lowerCAmelCase )
return os.path.join(__lowerCAmelCase ,self.dummy_file_name )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
if self._bucket_url is None:
_lowerCamelCase : List[str] = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: str ,*__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_lowerCamelCase : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_lowerCamelCase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.create_dummy_data_dict(__lowerCAmelCase ,__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(__lowerCAmelCase ,__lowerCAmelCase )
else:
return self.create_dummy_data_single(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ):
'''simple docstring'''
return self.download_and_extract(__lowerCAmelCase )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Optional[int] ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
return path
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return {}
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : str = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
for single_url in single_urls:
download_callback(__lowerCAmelCase )
else:
_lowerCamelCase : Union[str, Any] = single_urls
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Dict = [os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) ) for x in single_urls]
else:
_lowerCamelCase : Union[str, Any] = single_urls
_lowerCamelCase : List[str] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(Path(__lowerCAmelCase ).name ) )
_lowerCamelCase : List[Any] = value
# make sure that values are unique
if all(isinstance(__lowerCAmelCase ,__lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
_lowerCamelCase : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_lowerCamelCase : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,__lowerCAmelCase ) ) for url in data_url )
_lowerCamelCase : Optional[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
_lowerCamelCase : Tuple = [data_url[0]] * len(__lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__lowerCAmelCase )
return dummy_data_list
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(__lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_lowerCamelCase : Optional[int] = os.path.join(__lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
pass
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
def _iter_archive_members(__lowerCAmelCase: Any ):
# this preserves the order of the members inside the ZIP archive
_lowerCamelCase : Tuple = Path(self.dummy_file ).parent
_lowerCamelCase : str = path.relative_to(__lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
_lowerCamelCase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = Path(__lowerCAmelCase )
_lowerCamelCase : int = _iter_archive_members(__lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__lowerCAmelCase ).as_posix(), file_path.open("rb" )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__lowerCAmelCase ):
if os.path.basename(__lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__lowerCAmelCase ,__lowerCAmelCase ) | 340 | 1 |
import math
import sys
import cva
import numpy as np
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = math.sqrt(snake_case__ )
_SCREAMING_SNAKE_CASE = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.zeros((kernel_size, kernel_size) )
for i in range(0 ,snake_case__ ):
for j in range(0 ,snake_case__ ):
_SCREAMING_SNAKE_CASE = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(snake_case__ ,snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.zeros(img.shape )
_SCREAMING_SNAKE_CASE = get_gauss_kernel(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = img.shape
for i in range(kernel_size // 2 ,size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 ,size_y - kernel_size // 2 ):
_SCREAMING_SNAKE_CASE = get_slice(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = img_s - img_s[kernel_size // 2, kernel_size // 2]
_SCREAMING_SNAKE_CASE = vec_gaussian(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = np.multiply(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = np.multiply(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = np.sum(snake_case__ ) / np.sum(snake_case__ )
_SCREAMING_SNAKE_CASE = val
return imga
def __lowerCamelCase ( snake_case__ ) -> tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = args[1] if args[1:] else """../image_data/lena.jpg"""
_SCREAMING_SNAKE_CASE = float(args[2] ) if args[2:] else 1.0
_SCREAMING_SNAKE_CASE = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_SCREAMING_SNAKE_CASE = int(args[4] )
_SCREAMING_SNAKE_CASE = kernel_size + abs(kernel_size % 2 - 1 )
else:
_SCREAMING_SNAKE_CASE = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = parse_args(sys.argv)
UpperCamelCase = cva.imread(filename, 0)
cva.imshow('''input image''', img)
UpperCamelCase = img / 255
UpperCamelCase = out.astype('''float32''')
UpperCamelCase = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
UpperCamelCase = out * 255
UpperCamelCase = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 306 |
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 354 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = R"""\w+[.]\d+"""
UpperCamelCase__ = re.findall(__a , __a )
for pat in pats:
UpperCamelCase__ = key.replace(__a , """_""".join(pat.split(""".""" ) ) )
return key
def __magic_name__ ( __a : str , __a : Dict , __a : int ):
'''simple docstring'''
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCamelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( __a : List[Any] , __a : List[Any] , __a : Optional[int]=42 ):
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCamelCase__ = flax_model.init_weights(PRNGKey(__a ) )
UpperCamelCase__ = flatten_dict(__a )
UpperCamelCase__ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = rename_key(__a )
UpperCamelCase__ = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(__a )
return unflatten_dict(__a )
| 178 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ :str = logging.get_logger(__name__)
a_ :Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class snake_case__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "segformer"
def __init__( self : List[Any], _snake_case : List[Any]=3, _snake_case : Dict=4, _snake_case : Union[str, Any]=[2, 2, 2, 2], _snake_case : Union[str, Any]=[8, 4, 2, 1], _snake_case : List[str]=[3_2, 6_4, 1_6_0, 2_5_6], _snake_case : Union[str, Any]=[7, 3, 3, 3], _snake_case : List[str]=[4, 2, 2, 2], _snake_case : Optional[int]=[1, 2, 5, 8], _snake_case : str=[4, 4, 4, 4], _snake_case : Union[str, Any]="gelu", _snake_case : Dict=0.0, _snake_case : Any=0.0, _snake_case : Optional[int]=0.1, _snake_case : Tuple=0.0_2, _snake_case : Dict=0.1, _snake_case : Dict=1e-6, _snake_case : List[Any]=2_5_6, _snake_case : Dict=2_5_5, **_snake_case : Union[str, Any], ) ->Any:
super().__init__(**A_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.', A_, )
snake_case__ : List[Any] = num_channels
snake_case__ : Union[str, Any] = num_encoder_blocks
snake_case__ : List[Any] = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Dict = hidden_sizes
snake_case__ : Any = patch_sizes
snake_case__ : Dict = strides
snake_case__ : Any = mlp_ratios
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Dict = classifier_dropout_prob
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : Any = drop_path_rate
snake_case__ : List[str] = layer_norm_eps
snake_case__ : str = decoder_hidden_size
snake_case__ : Tuple = kwargs.get('reshape_last_stage', A_ )
snake_case__ : Union[str, Any] = semantic_loss_ignore_index
class snake_case__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def lowercase_ ( self : List[Any] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase_ ( self : str ) ->float:
return 1e-4
@property
def lowercase_ ( self : Optional[Any] ) ->int:
return 1_2
| 277 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : int = IFInpaintingPipeline
__lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__lowercase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {"latents"}
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self._get_dummy_components()
def __UpperCamelCase ( self , A_ , A_=0 ) -> List[Any]:
"""simple docstring"""
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self._test_save_load_local()
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 222 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> None:
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , A , )
super().__init__(*A , **A )
| 176 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
return ConvertCommand(
args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name )
lowerCamelCase : Tuple = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( A ) -> List[str]:
snake_case : Union[str, Any] = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=A , required=A , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=A , required=A , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=A , required=A , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=A , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=A , default=A , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=A )
def __init__( self , A , A , A , A , A , *A , ) -> List[Any]:
snake_case : Any = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
snake_case : int = model_type
snake_case : Any = tf_checkpoint
snake_case : int = pytorch_dump_output
snake_case : List[str] = config
snake_case : Tuple = finetuning_task_name
def UpperCAmelCase ( self ) -> int:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
if "ckpt" in self._tf_checkpoint.lower():
snake_case : List[Any] = self._tf_checkpoint
snake_case : Tuple = """"""
else:
snake_case : Tuple = self._tf_checkpoint
snake_case : Tuple = """"""
convert_transfo_xl_checkpoint_to_pytorch(
A , self._config , self._pytorch_dump_output , A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 176 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
_lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
_lowercase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
_lowercase =0
_lowercase =0
_lowercase =0
_lowercase =0
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =threshold
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =patience
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =0
_lowercase =0
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.inference_layers_num / self.inference_instances_num
_lowercase =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase =input_ids.size()
elif inputs_embeds is not None:
_lowercase =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
_lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowercase , _lowercase , _lowercase =encoder_hidden_states.size()
_lowercase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
_lowercase =self.invert_attention_mask(lowerCAmelCase )
else:
_lowercase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
_lowercase =self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
_lowercase =embedding_output
if self.training:
_lowercase =[]
for i in range(self.config.num_hidden_layers ):
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
_lowercase =self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_lowercase =self.pooler(encoder_outputs[0] )
_lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
_lowercase =0
_lowercase =None
_lowercase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](lowerCAmelCase )
if regression:
_lowercase =logits.detach()
if patient_result is not None:
_lowercase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowercase =0
else:
_lowercase =logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowercase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
_lowercase =0
_lowercase =logits
if patient_counter == self.patience:
break
_lowercase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =BertModelWithPabee(lowerCAmelCase )
_lowercase =nn.Dropout(config.hidden_dropout_prob )
_lowercase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowercase =(logits[-1],)
if labels is not None:
_lowercase =None
_lowercase =0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_lowercase =MSELoss()
_lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowercase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowercase =(total_loss / total_weights,) + outputs
return outputs
| 205 | 1 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Dict = TransfoXLTokenizer
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = False
def __lowerCamelCase ( self :Union[str, Any] ):
super().setUp()
snake_case__ : Optional[int] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :int ,**__lowercase :Any ):
snake_case__ : str = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname ,**__lowercase )
def __lowerCamelCase ( self :int ,__lowercase :Optional[int] ):
snake_case__ : int = '''<unk> UNwanted , running'''
snake_case__ : List[Any] = '''<unk> unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file ,lower_case=__lowercase )
snake_case__ : Tuple = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(__lowercase ,['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[0, 4, 8, 7] )
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = TransfoXLTokenizer(lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Any = TransfoXLTokenizer(lower_case=__lowercase )
snake_case__ : List[str] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
snake_case__ : Union[str, Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Optional[Any] = len(__lowercase )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' ,1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowercase ) ,original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) ,[1] )
self.assertEqual(tokenizer.decode([1] ) ,'''new1''' )
| 361 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> list:
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) - 1 , 0 , -1 ):
snake_case__ : List[Any] = False
for j in range(__lowerCAmelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case__ , snake_case__ : Optional[int] = unsorted[j - 1], unsorted[j]
snake_case__ : Any = True
for j in range(__lowerCAmelCase ):
if unsorted[j] > unsorted[j + 1]:
snake_case__ , snake_case__ : Tuple = unsorted[j + 1], unsorted[j]
snake_case__ : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input('''Enter numbers separated by a comma:\n''').strip()
A__ = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 44 | 0 |
def _a ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _a ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __magic_name__ ( __lowerCAmelCase):
A: torch.FloatTensor
class __magic_name__ ( __lowerCAmelCase , __lowerCAmelCase):
@register_to_config
def __init__( self : Union[str, Any] , lowerCamelCase__ : int = 32 , lowerCamelCase__ : int = 64 , lowerCamelCase__ : int = 20 , lowerCamelCase__ : int = 768 , lowerCamelCase__ : Optional[Any]=77 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : str = "silu" , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[str] = "linear" , lowerCamelCase__ : Optional[str] = "prd" , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : Optional[Any] = attention_head_dim
UpperCamelCase__ : List[str] = num_attention_heads * attention_head_dim
UpperCamelCase__ : Union[str, Any] = additional_embeddings
UpperCamelCase__ : Union[str, Any] = time_embed_dim or inner_dim
UpperCamelCase__ : int = embedding_proj_dim or embedding_dim
UpperCamelCase__ : Optional[Any] = clip_embed_dim or embedding_dim
UpperCamelCase__ : Dict = Timesteps(lowerCamelCase__ , lowerCamelCase__ , 0 )
UpperCamelCase__ : List[Any] = TimestepEmbedding(lowerCamelCase__ , lowerCamelCase__ , out_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if embedding_proj_norm_type is None:
UpperCamelCase__ : int = None
elif embedding_proj_norm_type == "layer":
UpperCamelCase__ : Optional[int] = nn.LayerNorm(lowerCamelCase__ )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
UpperCamelCase__ : Dict = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
if encoder_hid_proj_type is None:
UpperCamelCase__ : List[Any] = None
elif encoder_hid_proj_type == "linear":
UpperCamelCase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
UpperCamelCase__ : List[str] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCamelCase__ ) )
if added_emb_type == "prd":
UpperCamelCase__ : Any = nn.Parameter(torch.zeros(1 , 1 , lowerCamelCase__ ) )
elif added_emb_type is None:
UpperCamelCase__ : Union[str, Any] = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
UpperCamelCase__ : str = nn.ModuleList(
[
BasicTransformerBlock(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dropout=lowerCamelCase__ , activation_fn='''gelu''' , attention_bias=lowerCamelCase__ , )
for d in range(lowerCamelCase__ )
] )
if norm_in_type == "layer":
UpperCamelCase__ : int = nn.LayerNorm(lowerCamelCase__ )
elif norm_in_type is None:
UpperCamelCase__ : int = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
UpperCamelCase__ : Optional[Any] = nn.LayerNorm(lowerCamelCase__ )
UpperCamelCase__ : List[str] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 )
causal_attention_mask.triu_(1 )
UpperCamelCase__ : Union[str, Any] = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , lowerCamelCase__ , persistent=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
UpperCamelCase__ : Optional[int] = nn.Parameter(torch.zeros(1 , lowerCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = {}
def fn_recursive_add_processors(lowerCamelCase__ : str , lowerCamelCase__ : torch.nn.Module , lowerCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase__ , '''set_processor''' ):
UpperCamelCase__ : Optional[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase__ , lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return processors
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase__ : str , lowerCamelCase__ : torch.nn.Module , lowerCamelCase__ : Dict ):
if hasattr(lowerCamelCase__ , '''set_processor''' ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase__ , lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[torch.Tensor, float, int] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[torch.BoolTensor] = None , lowerCamelCase__ : bool = True , ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = hidden_states.shape[0]
UpperCamelCase__ : List[Any] = timestep
if not torch.is_tensor(lowerCamelCase__ ):
UpperCamelCase__ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ : Optional[int] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ : List[str] = timesteps * torch.ones(lowerCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase__ : int = self.time_proj(lowerCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCamelCase__ : List[str] = timesteps_projected.to(dtype=self.dtype )
UpperCamelCase__ : Optional[Any] = self.time_embedding(lowerCamelCase__ )
if self.embedding_proj_norm is not None:
UpperCamelCase__ : Dict = self.embedding_proj_norm(lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = self.embedding_proj(lowerCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCamelCase__ : int = self.encoder_hidden_states_proj(lowerCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
UpperCamelCase__ : Optional[int] = self.proj_in(lowerCamelCase__ )
UpperCamelCase__ : int = self.positional_embedding.to(hidden_states.dtype )
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : Optional[Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCamelCase__ : Tuple = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCamelCase__ : Dict = hidden_states[:, None, :]
UpperCamelCase__ : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCamelCase__ : int = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCamelCase__ , -1 , -1 )
additional_embeds.append(lowerCamelCase__ )
UpperCamelCase__ : Dict = torch.cat(
lowerCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCamelCase__ : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCamelCase__ : List[str] = F.pad(
lowerCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCamelCase__ : int = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCamelCase__ : Union[str, Any] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0
UpperCamelCase__ : Any = F.pad(lowerCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
UpperCamelCase__ : Optional[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCamelCase__ : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCamelCase__ : List[str] = self.norm_in(lowerCamelCase__ )
for block in self.transformer_blocks:
UpperCamelCase__ : Any = block(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = self.norm_out(lowerCamelCase__ )
if self.prd_embedding is not None:
UpperCamelCase__ : Optional[int] = hidden_states[:, -1]
else:
UpperCamelCase__ : int = hidden_states[:, additional_embeddings_len:]
UpperCamelCase__ : List[str] = self.proj_to_clip_embeddings(lowerCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : Tuple = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 146 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 360 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
def __init__(self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=1_3 , snake_case_ : str=3_2 , snake_case_ : Any=2 , snake_case_ : Union[str, Any]=3 , snake_case_ : int=1_6 , snake_case_ : Optional[int]=[3_2, 6_4, 1_2_8] , snake_case_ : str=[1, 2, 1] , snake_case_ : str=[2, 2, 4] , snake_case_ : List[str]=2 , snake_case_ : List[str]=2.0 , snake_case_ : List[Any]=True , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : int=0.1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[str]=False , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=0.02 , snake_case_ : List[str]=1E-5 , snake_case_ : List[Any]=True , snake_case_ : int=None , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=1_0 , snake_case_ : Union[str, Any]=8 , snake_case_ : Optional[Any]=["stage1", "stage2"] , snake_case_ : List[Any]=[1, 2] , ):
__a : Tuple = parent
__a : str = batch_size
__a : Any = image_size
__a : List[Any] = patch_size
__a : List[Any] = num_channels
__a : List[str] = embed_dim
__a : str = hidden_sizes
__a : Any = depths
__a : List[str] = num_heads
__a : Any = window_size
__a : List[str] = mlp_ratio
__a : Optional[int] = qkv_bias
__a : Any = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : str = drop_path_rate
__a : Optional[Any] = hidden_act
__a : Optional[int] = use_absolute_embeddings
__a : List[str] = patch_norm
__a : int = layer_norm_eps
__a : Optional[Any] = initializer_range
__a : List[str] = is_training
__a : Dict = scope
__a : Optional[Any] = use_labels
__a : Union[str, Any] = type_sequence_label_size
__a : Optional[int] = encoder_stride
__a : str = out_features
__a : Optional[int] = out_indices
def lowerCAmelCase (self : Dict ):
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase (self : Optional[Any] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase (self : Dict , snake_case_ : int , snake_case_ : Tuple , snake_case_ : str ):
__a : int = FocalNetModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Dict = model(snake_case_ )
__a : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase (self : Tuple , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__a : List[str] = FocalNetBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__a : Union[str, Any] = None
__a : Tuple = FocalNetBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
__a : List[str] = FocalNetForMaskedImageModeling(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(snake_case_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a : str = 1
__a : Optional[Any] = FocalNetForMaskedImageModeling(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int ):
__a : Dict = self.type_sequence_label_size
__a : Optional[Any] = FocalNetForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Union[str, Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[int] = 1
__a : str = FocalNetForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase (self : List[Any] ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Any = config_and_inputs
__a : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Optional[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Dict = False
def lowerCAmelCase (self : List[Any] ):
__a : Union[str, Any] = FocalNetModelTester(self )
__a : Dict = ConfigTester(self , config_class=snake_case_ , embed_dim=3_7 , has_text_modality=snake_case_ )
def lowerCAmelCase (self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase (self : Dict ):
return
def lowerCAmelCase (self : Dict ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case_ )
def lowerCAmelCase (self : Any ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case_ )
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def lowerCAmelCase (self : Optional[Any] ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def lowerCAmelCase (self : str ):
pass
def lowerCAmelCase (self : Tuple ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a : int = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowerCAmelCase (self : Optional[int] ):
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a : str = model_class(snake_case_ )
__a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Dict = [*signature.parameters.keys()]
__a : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCAmelCase (self : Tuple , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[Any] ):
__a : Any = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__a : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__a : Union[str, Any] = outputs.hidden_states
__a : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# FocalNet has a different seq_length
__a : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__a : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case_ ) , snake_case_ )
__a , __a , __a , __a : List[Any] = reshaped_hidden_states[0].shape
__a : List[str] = (
reshaped_hidden_states[0].view(snake_case_ , snake_case_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase (self : Optional[int] ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__a : Any = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[int] = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[str] = 3
__a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__a : int = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : List[str] = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
@slow
def lowerCAmelCase (self : str ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = FocalNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
__a : str = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase (self : str ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def lowerCAmelCase (self : str ):
__a : int = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(snake_case_ )
__a : Optional[Any] = self.default_image_processor
__a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__a : Optional[Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
__a : Any = model(**snake_case_ )
# verify the logits
__a : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__a : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : int = FocalNetConfig
_SCREAMING_SNAKE_CASE : Any = False
def lowerCAmelCase (self : Tuple ):
__a : Union[str, Any] = FocalNetModelTester(self )
| 90 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : List[str] ={
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__snake_case : Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ ="""Speech2TextFeatureExtractor"""
snake_case_ ="""Speech2TextTokenizer"""
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> str:
"""simple docstring"""
super().__init__(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ : int = self.feature_extractor
lowerCAmelCase__ : List[str] = False
def __call__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase ,**__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowerCAmelCase__ : Optional[Any] = kwargs.pop('''raw_speech''' )
else:
lowerCAmelCase__ : str = kwargs.pop('''audio''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''sampling_rate''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = kwargs.pop('''text''' ,__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowerCAmelCase__ : Union[str, Any] = args[0]
lowerCAmelCase__ : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowerCAmelCase__ : str = self.feature_extractor(__lowerCamelCase ,*__lowerCamelCase ,sampling_rate=__lowerCamelCase ,**__lowerCamelCase )
if text is not None:
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,**__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ : str = encodings['''input_ids''']
return inputs
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@contextmanager
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Union[str, Any] = self.tokenizer
yield
lowerCAmelCase__ : List[str] = self.feature_extractor
lowerCAmelCase__ : Any = False
| 129 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase = False ):
"""simple docstring"""
if not isinstance(__a , __a ):
_lowerCAmelCase = f"Expected string as input, found {type(__a )}"
raise ValueError(__a )
if not isinstance(__a , __a ):
_lowerCAmelCase = f"Expected boolean as use_pascal parameter, found {type(__a )}"
raise ValueError(__a )
_lowerCAmelCase = input_str.split("""_""" )
_lowerCAmelCase = 0 if use_pascal else 1
_lowerCAmelCase = words[start_index:]
_lowerCAmelCase = [word[0].upper() + word[1:] for word in words_to_capitalize]
_lowerCAmelCase = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 350 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
def __init__( self : str , __snake_case : Optional[int] , __snake_case : Dict=13 , __snake_case : Dict=32 , __snake_case : List[str]=2 , __snake_case : str=3 , __snake_case : str=16 , __snake_case : int=[1, 2, 1] , __snake_case : Dict=[2, 2, 4] , __snake_case : int=2 , __snake_case : str=2.0 , __snake_case : List[str]=True , __snake_case : Optional[Any]=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Dict=0.1 , __snake_case : Tuple="gelu" , __snake_case : str=False , __snake_case : Any=True , __snake_case : Union[str, Any]=0.02 , __snake_case : Union[str, Any]=1E-5 , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=None , __snake_case : Any=True , __snake_case : Optional[Any]=10 , __snake_case : Tuple=8 , __snake_case : List[Any]=["stage1", "stage2", "stage3"] , __snake_case : Dict=[1, 2, 3] , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = depths
_lowerCAmelCase = num_heads
_lowerCAmelCase = window_size
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = hidden_act
_lowerCAmelCase = use_absolute_embeddings
_lowerCAmelCase = patch_norm
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = is_training
_lowerCAmelCase = scope
_lowerCAmelCase = use_labels
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = encoder_stride
_lowerCAmelCase = out_features
_lowerCAmelCase = out_indices
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : int ) -> Any:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase__ ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] ) -> Dict:
_lowerCAmelCase = MaskFormerSwinModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
_lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase__ ( self : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = model(__snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__snake_case ):
_lowerCAmelCase = ["""stem"""]
_lowerCAmelCase = MaskFormerSwinBackbone(config=__snake_case )
def lowercase__ ( self : Union[str, Any] ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
_lowercase: str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_lowercase: List[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
_lowercase: Optional[Any] = False
_lowercase: List[str] = False
_lowercase: Optional[Any] = False
_lowercase: str = False
_lowercase: Union[str, Any] = False
def lowercase__ ( self : Optional[Any] ) -> str:
_lowerCAmelCase = MaskFormerSwinModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=__snake_case , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def lowercase__ ( self : Tuple ) -> str:
pass
def lowercase__ ( self : str ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : str ) -> Any:
return
def lowercase__ ( self : Tuple ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowercase__ ( self : Optional[int] ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__snake_case )
@unittest.skip("""Swin does not use inputs_embeds""" )
def lowercase__ ( self : str ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def lowercase__ ( self : Any ) -> Union[str, Any]:
pass
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowercase__ ( self : str ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def lowercase__ ( self : Union[str, Any] ) -> Any:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def lowercase__ ( self : List[str] ) -> Any:
pass
def lowercase__ ( self : Tuple , __snake_case : Optional[int] , __snake_case : str , __snake_case : str , __snake_case : Optional[int] ) -> Optional[int]:
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_lowerCAmelCase = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__snake_case ) , __snake_case )
# Swin has a different seq_length
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase__ ( self : List[str] ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Any ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def lowercase__ ( self : List[Any] ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Any ) -> str:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def lowercase__ ( self : Dict ) -> Optional[int]:
pass
def lowercase__ ( self : List[str] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__snake_case : List[str] ):
_lowerCAmelCase = 0
return t
def check_equivalence(__snake_case : Any , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Dict={} ):
with torch.no_grad():
_lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case )
_lowerCAmelCase = model(**__snake_case , return_dict=__snake_case , **__snake_case ).to_tuple()
def recursive_check(__snake_case : int , __snake_case : Any ):
if isinstance(__snake_case , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__snake_case , __snake_case ):
recursive_check(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__snake_case , __snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__snake_case ) , set_nan_tensor_to_zero(__snake_case ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}. Dict has"
f" `nan`: {torch.isnan(__snake_case ).any()} and `inf`: {torch.isinf(__snake_case )}."
) , )
recursive_check(__snake_case , __snake_case )
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
_lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
check_equivalence(__snake_case , __snake_case , __snake_case , {"""output_hidden_states""": True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase , snake_case_ ):
_lowercase: int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_lowercase: Dict = MaskFormerSwinConfig
def lowercase__ ( self : Union[str, Any] ) -> str:
_lowerCAmelCase = MaskFormerSwinModelTester(self )
def lowercase__ ( self : str ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_lowerCAmelCase = backbone_class(__snake_case )
backbone.to(__snake_case )
backbone.eval()
_lowerCAmelCase = backbone(**__snake_case )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __snake_case )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_lowerCAmelCase = backbone(**__snake_case , output_hidden_states=__snake_case )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_lowerCAmelCase = backbone(**__snake_case , output_attentions=__snake_case )
self.assertIsNotNone(outputs.attentions )
| 220 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowercase_ = logging.get_logger(__name__)
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int], *_lowerCamelCase : List[Any], **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''', _lowerCamelCase, )
super().__init__(*_lowerCamelCase, **_lowerCamelCase )
| 266 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowercase_ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowercase_ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase ( ):
"""simple docstring"""
__A = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__A = bs[:]
__A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
__A = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = set()
__A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__A = char
return pairs
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
A_ : Tuple = VOCAB_FILES_NAMES
A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : Dict, _lowerCamelCase : Optional[Any], _lowerCamelCase : List[str], _lowerCamelCase : Dict="replace", _lowerCamelCase : Any="<s>", _lowerCamelCase : Optional[int]="</s>", _lowerCamelCase : Dict="</s>", _lowerCamelCase : List[Any]="<s>", _lowerCamelCase : List[str]="<unk>", _lowerCamelCase : str="<pad>", _lowerCamelCase : Any="<mask>", _lowerCamelCase : Any=False, **_lowerCamelCase : Tuple, ):
'''simple docstring'''
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else bos_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else eos_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else sep_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else cls_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else unk_token
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(_lowerCamelCase, lstrip=_lowerCamelCase, rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase, _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase, bos_token=_lowerCamelCase, eos_token=_lowerCamelCase, unk_token=_lowerCamelCase, sep_token=_lowerCamelCase, cls_token=_lowerCamelCase, pad_token=_lowerCamelCase, mask_token=_lowerCamelCase, add_prefix_space=_lowerCamelCase, **_lowerCamelCase, )
with open(_lowerCamelCase, encoding='''utf-8''' ) as vocab_handle:
__A = json.load(_lowerCamelCase )
__A = {v: k for k, v in self.encoder.items()}
__A = errors # how to handle errors in decoding
__A = bytes_to_unicode()
__A = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase, encoding='''utf-8''' ) as merges_handle:
__A = merges_handle.read().split('''\n''' )[1:-1]
__A = [tuple(merge.split() ) for merge in bpe_merges]
__A = dict(zip(_lowerCamelCase, range(len(_lowerCamelCase ) ) ) )
__A = {}
__A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__A = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : List[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__A = tuple(_lowerCamelCase )
__A = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
__A = min(_lowerCamelCase, key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__A , __A = bigram
__A = []
__A = 0
while i < len(_lowerCamelCase ):
try:
__A = word.index(_lowerCamelCase, _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__A = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__A = tuple(_lowerCamelCase )
__A = new_word
if len(_lowerCamelCase ) == 1:
break
else:
__A = get_pairs(_lowerCamelCase )
__A = ''' '''.join(_lowerCamelCase )
__A = word
return word
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Dict ):
'''simple docstring'''
__A = []
for token in re.findall(self.pat, _lowerCamelCase ):
__A = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : Dict ):
'''simple docstring'''
return self.encoder.get(_lowerCamelCase, self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : Any ):
'''simple docstring'''
return self.decoder.get(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ):
'''simple docstring'''
__A = ''''''.join(_lowerCamelCase )
__A = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__A = os.path.join(
_lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__A = os.path.join(
_lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=_lowerCamelCase, ensure_ascii=_lowerCamelCase ) + '''\n''' )
__A = 0
with open(_lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
__A = token_index
writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : Optional[int], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None, _lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase, token_ids_a=_lowerCamelCase, already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Union[str, Any], _lowerCamelCase : List[str]=False, **_lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
__A = ''' ''' + text
return (text, kwargs)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], _lowerCamelCase : List[int], _lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : "Conversation" ):
'''simple docstring'''
__A = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
__A = ''' '''.join(_lowerCamelCase )
__A = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
__A = input_ids[-self.model_max_length :]
logger.warning(f'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' )
return input_ids
| 266 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( a_ :int) -> Optional[int]:
__a : List[Any] = 3_84
__a : Tuple = 7
if "tiny" in model_name:
__a : int = 96
__a : str = (2, 2, 6, 2)
__a : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
__a : Union[str, Any] = 96
__a : Any = (2, 2, 18, 2)
__a : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
__a : Tuple = 1_28
__a : List[Any] = (2, 2, 18, 2)
__a : int = (4, 8, 16, 32)
__a : Optional[int] = 12
__a : Optional[int] = 5_12
elif "large" in model_name:
__a : Optional[Any] = 1_92
__a : int = (2, 2, 18, 2)
__a : int = (6, 12, 24, 48)
__a : List[Any] = 12
__a : Optional[Any] = 7_68
# set label information
__a : Optional[Any] = 1_50
__a : Tuple = """huggingface/label-files"""
__a : List[str] = """ade20k-id2label.json"""
__a : str = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''') , '''r'''))
__a : Union[str, Any] = {int(_snake_case): v for k, v in idalabel.items()}
__a : List[Any] = {v: k for k, v in idalabel.items()}
__a : str = SwinConfig(
embed_dim=_snake_case , depths=_snake_case , num_heads=_snake_case , window_size=_snake_case , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
__a : int = UperNetConfig(
backbone_config=_snake_case , auxiliary_in_channels=_snake_case , num_labels=_snake_case , idalabel=_snake_case , labelaid=_snake_case , )
return config
def __A ( a_ :Dict) -> Tuple:
__a : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight'''))
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias'''))
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight'''))
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias'''))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight"""))
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias"""))
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight"""))
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight"""))
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias"""))
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight"""))
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias"""))
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
])
# fmt: on
return rename_keys
def __A ( a_ :Union[str, Any] , a_ :Optional[int] , a_ :Dict) -> List[Any]:
__a : Optional[Any] = dct.pop(_snake_case)
__a : Tuple = val
def __A ( a_ :Union[str, Any] , a_ :int) -> Tuple:
__a : int = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
__a : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__a : List[Any] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""")
__a : Union[str, Any] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""")
# next, add query, keys and values (in that order) to the state dict
__a : Tuple = in_proj_weight[:dim, :]
__a : List[Any] = in_proj_bias[: dim]
__a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
__a : List[Any] = in_proj_bias[
dim : dim * 2
]
__a : Tuple = in_proj_weight[
-dim :, :
]
__a : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def __A ( a_ :Tuple) -> Union[str, Any]:
__a : List[Any] = x.shape
__a : List[Any] = x.reshape(_snake_case , 4 , in_channel // 4)
__a : Dict = x[:, [0, 2, 1, 3], :].transpose(1 , 2).reshape(_snake_case , _snake_case)
return x
def __A ( a_ :Optional[int]) -> Any:
__a : Optional[Any] = x.shape
__a : Any = x.reshape(_snake_case , in_channel // 4 , 4)
__a : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2).reshape(_snake_case , _snake_case)
return x
def __A ( a_ :Dict) -> List[Any]:
__a : Tuple = x.shape[0]
__a : List[str] = x.reshape(4 , in_channel // 4)
__a : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 , 1).reshape(_snake_case)
return x
def __A ( a_ :str) -> Union[str, Any]:
__a : int = x.shape[0]
__a : List[str] = x.reshape(in_channel // 4 , 4)
__a : Tuple = x[:, [0, 2, 1, 3]].transpose(0 , 1).reshape(_snake_case)
return x
def __A ( a_ :Any , a_ :Tuple , a_ :Optional[int]) -> str:
__a : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
__a : Optional[int] = model_name_to_url[model_name]
__a : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' , file_name=_snake_case)[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case , param.shape)
__a : Optional[Any] = get_upernet_config(_snake_case)
__a : List[str] = UperNetForSemanticSegmentation(_snake_case)
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__a : Optional[int] = state_dict.pop(_snake_case)
if "bn" in key:
__a : Optional[int] = key.replace('''bn''' , '''batch_norm''')
__a : Dict = val
# rename keys
__a : str = create_rename_keys(_snake_case)
for src, dest in rename_keys:
rename_key(_snake_case , _snake_case , _snake_case)
read_in_q_k_v(_snake_case , config.backbone_config)
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__a : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case)
if "norm" in key:
__a : Tuple = reverse_correct_unfold_norm_order(_snake_case)
model.load_state_dict(_snake_case)
# verify on image
__a : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__a : str = Image.open(requests.get(_snake_case , stream=_snake_case).raw).convert('''RGB''')
__a : Optional[Any] = SegformerImageProcessor()
__a : Optional[int] = processor(_snake_case , return_tensors='''pt''').pixel_values
with torch.no_grad():
__a : Tuple = model(_snake_case)
__a : List[Any] = outputs.logits
print(logits.shape)
print('''First values of logits:''' , logits[0, 0, :3, :3])
# assert values
if model_name == "upernet-swin-tiny":
__a : Tuple = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]])
elif model_name == "upernet-swin-small":
__a : Optional[Any] = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]])
elif model_name == "upernet-swin-base":
__a : Union[str, Any] = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]])
elif model_name == "upernet-swin-large":
__a : Dict = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]])
print('''Logits:''' , outputs.logits[0, 0, :3, :3])
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _snake_case , atol=1e-4)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""")
model.save_pretrained(_snake_case)
print(F"""Saving processor to {pytorch_dump_folder_path}""")
processor.save_pretrained(_snake_case)
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""")
model.push_to_hub(F"""openmmlab/{model_name}""")
processor.push_to_hub(F"""openmmlab/{model_name}""")
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 160 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__A : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__A : Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCamelCase_ ( A__ : List[str] , A__ : List[Any] , A__ : Dict=8 ):
'''simple docstring'''
lowerCAmelCase_ : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCAmelCase_ : str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __snake_case ( _snake_case):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Optional[int] , ) -> List[str]:
super().__init__()
self.register_modules(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , )
lowerCAmelCase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowercase ( self : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> Union[str, Any]:
if latents is None:
lowerCAmelCase_ : Union[str, Any] = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCAmelCase_ : Tuple = latents.to(UpperCamelCase__ )
lowerCAmelCase_ : Tuple = latents * scheduler.init_noise_sigma
return latents
def __lowercase ( self : int , lowerCamelCase : Dict=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase_ : Union[str, Any] = torch.device(F'cuda:{gpu_id}' )
lowerCAmelCase_ : Optional[int] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
def __lowercase ( self : Tuple , lowerCamelCase : Optional[Any]=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase_ : List[str] = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCAmelCase_, lowerCAmelCase_ : Dict = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ )
# We'll offload the last model manually.
lowerCAmelCase_ : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowercase ( self : str ) -> Union[str, Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase__ )
def __call__( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple = 5_12 , lowerCamelCase : List[str] = 5_12 , lowerCamelCase : int = 1_00 , lowerCamelCase : List[Any] = 4.0 , lowerCamelCase : List[str] = 1 , lowerCamelCase : List[Any] = None , lowerCamelCase : str = None , lowerCamelCase : Dict = "pil" , lowerCamelCase : Any = True , ) -> Any:
lowerCAmelCase_ : Dict = self._execution_device
lowerCAmelCase_ : Optional[Any] = guidance_scale > 1.0
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase_ : Optional[Any] = torch.cat(UpperCamelCase__ , dim=0 )
lowerCAmelCase_ : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase_ : Any = torch.cat(UpperCamelCase__ , dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase_ : List[Any] = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCAmelCase_ : str = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCAmelCase_ : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ )
self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ )
lowerCAmelCase_ : Optional[int] = self.scheduler.timesteps
lowerCAmelCase_ : Tuple = self.unet.config.in_channels
lowerCAmelCase_, lowerCAmelCase_ : Dict = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor )
# create initial latent
lowerCAmelCase_ : Optional[int] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase_ : List[str] = {"""image_embeds""": image_embeds}
lowerCAmelCase_ : int = self.unet(
sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
if do_classifier_free_guidance:
lowerCAmelCase_, lowerCAmelCase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCAmelCase_, lowerCAmelCase_ : Tuple = noise_pred.chunk(2 )
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = variance_pred.chunk(2 )
lowerCAmelCase_ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase_ : Optional[int] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase_, lowerCAmelCase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : Union[str, Any] = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0]
# post-processing
lowerCAmelCase_ : Union[str, Any] = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase_ : List[str] = image * 0.5 + 0.5
lowerCAmelCase_ : Dict = image.clamp(0 , 1 )
lowerCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase_ : Any = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 362 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger(__name__)
__A : str = "https://openaipublic.azureedge.net/jukebox/models/"
__A : Any = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
lowerCAmelCase_ : Union[str, Any] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
lowerCAmelCase_ : Union[str, Any] = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
lowerCAmelCase_ : Union[str, Any] = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
lowerCAmelCase_ : int = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
lowerCAmelCase_ : Any = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
lowerCAmelCase_ : str = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
lowerCAmelCase_ : int = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
lowerCAmelCase_ : List[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def UpperCamelCase_ ( A__ : Dict , A__ : Optional[Any] , A__ : Tuple , A__ : Any ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = {}
import re
lowerCAmelCase_ : Union[str, Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
lowerCAmelCase_ : str = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
lowerCAmelCase_ : Dict = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
lowerCAmelCase_ : int = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
lowerCAmelCase_ : Optional[int] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
lowerCAmelCase_ : Union[str, Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
lowerCAmelCase_ : Any = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
lowerCAmelCase_ : Dict = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
lowerCAmelCase_ : str = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A__ ):
lowerCAmelCase_ : Dict = re_encoder_block_conv_in.match(A__ )
lowerCAmelCase_ : Optional[int] = regex_match.groups()
lowerCAmelCase_ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase_ : Optional[Any] = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
lowerCAmelCase_ : List[str] = re_encoder_block_conv_in.sub(A__ , A__ )
elif re_encoder_block_resnet.fullmatch(A__ ):
lowerCAmelCase_ : Tuple = re_encoder_block_resnet.match(A__ )
lowerCAmelCase_ : Tuple = regex_match.groups()
lowerCAmelCase_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
lowerCAmelCase_ : str = {"""1""": 1, """3""": 2}[groups[-2]]
lowerCAmelCase_ : int = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
lowerCAmelCase_ : Optional[Any] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowerCAmelCase_ : List[str] = prefix + resnet_block
lowerCAmelCase_ : Tuple = re_encoder_block_resnet.sub(A__ , A__ )
elif re_encoder_block_proj_out.fullmatch(A__ ):
lowerCAmelCase_ : int = re_encoder_block_proj_out.match(A__ )
lowerCAmelCase_ : Tuple = regex_match.groups()
lowerCAmelCase_ : Optional[Any] = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
lowerCAmelCase_ : str = re_encoder_block_proj_out.sub(A__ , A__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A__ ):
lowerCAmelCase_ : List[Any] = re_decoder_block_conv_out.match(A__ )
lowerCAmelCase_ : Tuple = regex_match.groups()
lowerCAmelCase_ : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase_ : Any = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
lowerCAmelCase_ : Tuple = re_decoder_block_conv_out.sub(A__ , A__ )
elif re_decoder_block_resnet.fullmatch(A__ ):
lowerCAmelCase_ : Optional[Any] = re_decoder_block_resnet.match(A__ )
lowerCAmelCase_ : Optional[Any] = regex_match.groups()
lowerCAmelCase_ : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
lowerCAmelCase_ : Any = {"""1""": 1, """3""": 2}[groups[-2]]
lowerCAmelCase_ : Optional[int] = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
lowerCAmelCase_ : Union[str, Any] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowerCAmelCase_ : Dict = prefix + resnet_block
lowerCAmelCase_ : Any = re_decoder_block_resnet.sub(A__ , A__ )
elif re_decoder_block_proj_in.fullmatch(A__ ):
lowerCAmelCase_ : str = re_decoder_block_proj_in.match(A__ )
lowerCAmelCase_ : Optional[int] = regex_match.groups()
lowerCAmelCase_ : str = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
lowerCAmelCase_ : Union[str, Any] = re_decoder_block_proj_in.sub(A__ , A__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A__ ):
lowerCAmelCase_ : Optional[Any] = re_prior_cond_conv_out.match(A__ )
lowerCAmelCase_ : Union[str, Any] = regex_match.groups()
lowerCAmelCase_ : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase_ : Optional[int] = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
lowerCAmelCase_ : Dict = re_prior_cond_conv_out.sub(A__ , A__ )
elif re_prior_cond_resnet.fullmatch(A__ ):
lowerCAmelCase_ : Any = re_prior_cond_resnet.match(A__ )
lowerCAmelCase_ : int = regex_match.groups()
lowerCAmelCase_ : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
lowerCAmelCase_ : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
lowerCAmelCase_ : Optional[Any] = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
lowerCAmelCase_ : List[Any] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
lowerCAmelCase_ : Optional[int] = prefix + resnet_block
lowerCAmelCase_ : Dict = re_prior_cond_resnet.sub(A__ , A__ )
elif re_prior_cond_proj_in.fullmatch(A__ ):
lowerCAmelCase_ : List[str] = re_prior_cond_proj_in.match(A__ )
lowerCAmelCase_ : Optional[Any] = regex_match.groups()
lowerCAmelCase_ : Any = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
lowerCAmelCase_ : List[str] = re_prior_cond_proj_in.sub(A__ , A__ )
# keep original key
else:
lowerCAmelCase_ : Optional[Any] = original_key
lowerCAmelCase_ : Optional[Any] = replace_key(A__ )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
lowerCAmelCase_ : Dict = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
lowerCAmelCase_ : str = original_key
lowerCAmelCase_ : Dict = original_key
lowerCAmelCase_ : Optional[int] = value
return new_dict
@torch.no_grad()
def UpperCamelCase_ ( A__ : Optional[Any]=None , A__ : str=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
lowerCAmelCase_ : List[Any] = requests.get(f'{PREFIX}{file}' , allow_redirects=A__ )
os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=A__ )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , """wb""" ).write(r.content )
lowerCAmelCase_ : Optional[int] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
lowerCAmelCase_ : List[str] = JukeboxConfig.from_pretrained(A__ )
lowerCAmelCase_ : Dict = JukeboxModel(A__ )
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = {}
for i, dict_name in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )["""model"""]
lowerCAmelCase_ : Dict = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
lowerCAmelCase_ : Optional[int] = old_dic[k]
elif k.endswith(""".w""" ):
lowerCAmelCase_ : Optional[int] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
lowerCAmelCase_ : Tuple = old_dic[k]
else:
lowerCAmelCase_ : List[Any] = old_dic[k]
lowerCAmelCase_ : Union[str, Any] = """vqvae""" if i == 0 else f'priors.{3 - i}'
lowerCAmelCase_ : str = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ )
weight_dict.append(A__ )
lowerCAmelCase_ : int = weight_dict.pop(0 )
model.vqvae.load_state_dict(A__ )
for i in range(len(A__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A__ ).mkdir(exist_ok=A__ )
with open(f'{pytorch_dump_folder_path}/mapping.json' , """w""" ) as txtfile:
json.dump(A__ , A__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
return weight_dict
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__A : Optional[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 89 | 0 |
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
def update_area_of_max_square(snake_case , snake_case ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__SCREAMING_SNAKE_CASE : List[Any] = update_area_of_max_square(lowerCAmelCase__ , col + 1 )
__SCREAMING_SNAKE_CASE : Optional[int] = update_area_of_max_square(row + 1 , col + 1 )
__SCREAMING_SNAKE_CASE : Dict = update_area_of_max_square(row + 1 , lowerCAmelCase__ )
if mat[row][col]:
__SCREAMING_SNAKE_CASE : Any = 1 + min([right, diagonal, down] )
__SCREAMING_SNAKE_CASE : List[Any] = max(largest_square_area[0] , lowerCAmelCase__ )
return sub_problem_sol
else:
return 0
__SCREAMING_SNAKE_CASE : str = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
snake_case , snake_case , snake_case ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__SCREAMING_SNAKE_CASE : Optional[Any] = update_area_of_max_square_using_dp_array(lowerCAmelCase__ , col + 1 , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = update_area_of_max_square_using_dp_array(row + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
if mat[row][col]:
__SCREAMING_SNAKE_CASE : int = 1 + min([right, diagonal, down] )
__SCREAMING_SNAKE_CASE : Optional[int] = max(largest_square_area[0] , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = sub_problem_sol
return sub_problem_sol
else:
return 0
__SCREAMING_SNAKE_CASE : Any = [0]
__SCREAMING_SNAKE_CASE : Optional[Any] = [[-1] * cols for _ in range(lowerCAmelCase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCAmelCase__ )
return largest_square_area[0]
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = [[0] * (cols + 1) for _ in range(rows + 1 )]
__SCREAMING_SNAKE_CASE : str = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__SCREAMING_SNAKE_CASE : Any = dp_array[row][col + 1]
__SCREAMING_SNAKE_CASE : List[str] = dp_array[row + 1][col + 1]
__SCREAMING_SNAKE_CASE : List[str] = dp_array[row + 1][col]
if mat[row][col] == 1:
__SCREAMING_SNAKE_CASE : Dict = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = max(dp_array[row][col] , lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : List[Any] = 0
return largest_square_area
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = [0] * (cols + 1)
__SCREAMING_SNAKE_CASE : Tuple = [0] * (cols + 1)
__SCREAMING_SNAKE_CASE : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__SCREAMING_SNAKE_CASE : Optional[Any] = current_row[col + 1]
__SCREAMING_SNAKE_CASE : Optional[int] = next_row[col + 1]
__SCREAMING_SNAKE_CASE : List[str] = next_row[col]
if mat[row][col] == 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 + min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = max(current_row[col] , lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 303 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowercase ( lowerCAmelCase__ : Namespace ) -> Tuple:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowercase_ = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def __UpperCAmelCase ( _a ):
__a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_a , required=_a , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_a , required=_a , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_a , required=_a , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_a , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_a , default=_a , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a , _a , *_a , ):
__a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(f'''Loading model {model_type}''' )
__a = model_type
__a = tf_checkpoint
__a = pytorch_dump_output
__a = config
__a = finetuning_task_name
def __UpperCAmelCase ( self ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
if "ckpt" in self._tf_checkpoint.lower():
__a = self._tf_checkpoint
__a = ''''''
else:
__a = self._tf_checkpoint
__a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_a , self._config , self._pytorch_dump_output , _a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 45 | 0 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
_A = {
"b0": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_24,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 12_80,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_40,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 14_08,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_60,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 15_36,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_00,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 17_92,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_80,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 20_48,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_56,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 23_04,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_28,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 25_60,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_00,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
snake_case = EfficientNetConfig()
snake_case = CONFIG_MAP[model_name]["hidden_dim"]
snake_case = CONFIG_MAP[model_name]["width_coef"]
snake_case = CONFIG_MAP[model_name]["depth_coef"]
snake_case = CONFIG_MAP[model_name]["image_size"]
snake_case = CONFIG_MAP[model_name]["dropout_rate"]
snake_case = CONFIG_MAP[model_name]["dw_padding"]
snake_case = "huggingface/label-files"
snake_case = "imagenet-1k-id2label.json"
snake_case = 1000
snake_case = json.load(open(hf_hub_download(A__ , A__ , repo_type="dataset" ) , "r" ) )
snake_case = {int(A__ ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( ) -> str:
"""simple docstring"""
snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
def lowercase_ ( A__ ) -> Tuple:
"""simple docstring"""
snake_case = CONFIG_MAP[model_name]["image_size"]
snake_case = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=A__ , )
return preprocessor
def lowercase_ ( A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
snake_case = sorted(set(A__ ) )
snake_case = len(A__ )
snake_case = {b: str(A__ ) for b, i in zip(A__ , range(A__ ) )}
snake_case = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
snake_case = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
snake_case = {}
for item in rename_keys:
if item[0] in original_param_names:
snake_case = "efficientnet." + item[1]
snake_case = "classifier.weight"
snake_case = "classifier.bias"
return key_mapping
def lowercase_ ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
snake_case = key_mapping[key]
if "_conv" in key and "kernel" in key:
snake_case = torch.from_numpy(A__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
snake_case = torch.from_numpy(A__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
snake_case = torch.from_numpy(np.transpose(A__ ) )
else:
snake_case = torch.from_numpy(A__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(A__ )
@torch.no_grad()
def lowercase_ ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = model_classes[model_name](
include_top=A__ , weights="imagenet" , input_tensor=A__ , input_shape=A__ , pooling=A__ , classes=1000 , classifier_activation="softmax" , )
snake_case = original_model.trainable_variables
snake_case = original_model.non_trainable_variables
snake_case = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
snake_case = param.numpy()
snake_case = list(tf_params.keys() )
# Load HuggingFace model
snake_case = get_efficientnet_config(A__ )
snake_case = EfficientNetForImageClassification(A__ ).eval()
snake_case = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
snake_case = rename_keys(A__ )
replace_params(A__ , A__ , A__ )
# Initialize preprocessor and preprocess input image
snake_case = convert_image_processor(A__ )
snake_case = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
snake_case = hf_model(**A__ )
snake_case = outputs.logits.detach().numpy()
# Original model inference
snake_case = False
snake_case = CONFIG_MAP[model_name]["image_size"]
snake_case = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
snake_case = image.img_to_array(A__ )
snake_case = np.expand_dims(A__ , axis=0 )
snake_case = original_model.predict(A__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(A__ , A__ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(A__ ):
os.mkdir(A__ )
# Save converted model and image processor
hf_model.save_pretrained(A__ )
preprocessor.save_pretrained(A__ )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
snake_case = F'efficientnet-{model_name}'
preprocessor.push_to_hub(A__ )
hf_model.push_to_hub(A__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
_A = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 137 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_A = logging.getLogger(__name__)
_A = "pytorch_model.bin"
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default=A_ , metadata={"help": "The name of the task to train on."} , )
UpperCAmelCase__ : Optional[List[str]] = dataclasses.field(
default=A_ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
UpperCAmelCase__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
UpperCAmelCase__ : Optional[bool] = dataclasses.field(
default=A_ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
UpperCAmelCase__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
UpperCAmelCase__ : Optional[int] = dataclasses.field(
default=A_ , metadata={"help": "Random seed for initialization."} , )
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case = dataset.filter(lambda A__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case = int(eval_result * len(A__ ) )
print(A__ )
snake_case = dataset.sort("probability" , reverse=A__ )
snake_case = dataset.select(range(A__ ) )
snake_case = dataset.remove_columns(["label", "probability"] )
snake_case = dataset.rename_column("prediction" , "label" )
snake_case = dataset.map(lambda A__ : {"label": idalabel[example["label"]]} )
snake_case = dataset.shuffle(seed=args.seed )
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.data_file_extension == "csv":
dataset.to_csv(A__ , index=A__ )
else:
dataset.to_json(A__ )
def lowercase_ ( A__ , A__ , A__ , A__ , **A__ ) -> List[Any]:
"""simple docstring"""
snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case = STModelArguments(model_name_or_path=A__ )
snake_case = STDataArguments(train_file=A__ , infer_file=A__ )
snake_case = STTrainingArguments(output_dir=A__ )
snake_case = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A__ ).items():
setattr(A__ , A__ , A__ )
for key, value in kwargs.items():
if hasattr(A__ , A__ ):
setattr(A__ , A__ , A__ )
# Sanity checks
snake_case = {}
snake_case = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case = args.train_file
snake_case = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case = args.eval_file
for key in data_files:
snake_case = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F'`{key}_file` should be a csv or a json file.'
if args.data_file_extension is None:
snake_case = extension
else:
assert extension == args.data_file_extension, F'`{key}_file` should be a {args.data_file_extension} file`.'
assert (
args.eval_metric in datasets.list_metrics()
), F'{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
snake_case = F'{args.output_dir}/self-train_iter-{{}}'.format
snake_case = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A__ )
os.makedirs(A__ , exist_ok=A__ )
accelerator.wait_for_everyone()
snake_case = None
snake_case = None
snake_case = 0
snake_case = False
# Show the progress bar
snake_case = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case = data_dir_format(A__ )
assert os.path.exists(A__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case = os.path.join(A__ , "stage-1" )
snake_case = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A__ , A__ ):
arguments_dict.update({key: value} )
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , A__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case = os.path.join(A__ , "best-checkpoint" )
snake_case = os.path.join(A__ , "stage-2" )
# Update arguments_dict
snake_case = model_path
snake_case = data_files["train"]
snake_case = current_output_dir
snake_case = os.path.join(A__ , "best-checkpoint" , A__ )
if os.path.exists(A__ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , A__ , A__ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , A__ )
finetune(**A__ )
accelerator.wait_for_everyone()
assert os.path.exists(A__ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , A__ )
snake_case = iteration
snake_case = data_dir_format(iteration + 1 )
snake_case = AutoConfig.from_pretrained(os.path.join(A__ , "best-checkpoint" ) )
snake_case = config.idalabel
snake_case = os.path.join(A__ , "eval_results_best-checkpoint.json" )
snake_case = os.path.join(A__ , "test_results_best-checkpoint.json" )
assert os.path.exists(A__ )
with open(A__ , "r" ) as f:
snake_case = float(json.load(A__ )[args.eval_metric] )
snake_case = os.path.join(A__ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(A__ )
# Loading the dataset from local csv or json files.
snake_case = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
snake_case = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(A__ , exist_ok=A__ )
shutil.copy(A__ , os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) )
if os.path.exists(A__ ):
shutil.copy(A__ , os.path.join(A__ , F'test_results_iter-{iteration}.json' ) )
create_pseudo_labeled_data(A__ , A__ , A__ , A__ , A__ , A__ )
accelerator.wait_for_everyone()
snake_case = os.path.join(A__ , F'train_pseudo.{args.data_file_extension}' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case = eval_result
if best_iteration is None:
snake_case = new_iteration
snake_case = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case = new_iteration
snake_case = new_eval_result
snake_case = 0
else:
if new_eval_result == best_eval_result:
snake_case = new_iteration
snake_case = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , A__ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{iteration}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , A__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A__ , F'eval_results_iter-{args.max_selftrain_iterations - 1}.json' ) , os.path.join(A__ , "eval_results_best-iteration.json" ) , )
| 137 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A =yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
A ={
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A ={
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
A ='\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A =(
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
A ='\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A =(
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
A ='\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A ='The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A ='The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
A ='The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
A ='The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
A ='The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A ='The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
A ='The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A ='The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
A =''
A ='The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
A ='\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
A ='The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ (_a : str , _a : str ):
assert ReadMe.from_string(_a , _a ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ (_a : Any , _a : List[str] ):
with pytest.raises(_a , match=re.escape(expected_error.format(path='''root''' ) ) ):
UpperCAmelCase = ReadMe.from_string(_a , _a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ (_a : List[Any] , _a : Dict ):
with pytest.raises(_a , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(_a , _a )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ (_a : Optional[int] ):
ReadMe.from_string(_a , _a , suppress_parsing_errors=_a )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def snake_case_ (_a : Any , _a : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = Path(_a ) / '''README.md'''
with open(_a , '''w+''' ) as readme_file:
readme_file.write(_a )
UpperCAmelCase = ReadMe.from_readme(_a , _a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def snake_case_ (_a : Tuple , _a : List[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = Path(_a ) / '''README.md'''
with open(_a , '''w+''' ) as readme_file:
readme_file.write(_a )
UpperCAmelCase = expected_error.format(path=_a )
with pytest.raises(_a , match=re.escape(_a ) ):
UpperCAmelCase = ReadMe.from_readme(_a , _a )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ (_a : List[Any] , _a : List[str] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = Path(_a ) / '''README.md'''
with open(_a , '''w+''' ) as readme_file:
readme_file.write(_a )
UpperCAmelCase = expected_error.format(path=_a )
with pytest.raises(_a , match=re.escape(_a ) ):
ReadMe.from_readme(_a , _a )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def snake_case_ (_a : int ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = Path(_a ) / '''README.md'''
with open(_a , '''w+''' ) as readme_file:
readme_file.write(_a )
ReadMe.from_readme(_a , _a , suppress_parsing_errors=_a )
| 34 | """simple docstring"""
import os
import numpy
import onnx
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = a.name
UpperCAmelCase__ = b.name
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = a == b
UpperCAmelCase__ = name_a
UpperCAmelCase__ = name_b
return res
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = os.path.dirname(lowerCamelCase )
UpperCAmelCase__ = os.path.basename(lowerCamelCase )
UpperCAmelCase__ = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = list(model.graph.initializer )
UpperCAmelCase__ = set()
UpperCAmelCase__ = {}
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
for i in range(len(lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase )
dup_set.add(lowerCamelCase )
UpperCAmelCase__ = inits[j].data_type
UpperCAmelCase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase )
total_reduced_size += mem_size
UpperCAmelCase__ = inits[i].name
UpperCAmelCase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase )
else:
UpperCAmelCase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
UpperCAmelCase__ = sorted(lowerCamelCase )
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = 'optimized_' + model_file_name
UpperCAmelCase__ = os.path.join(lowerCamelCase , lowerCamelCase )
onnx.save(lowerCamelCase , lowerCamelCase )
return new_model
| 98 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = 384
if "tiny" in model_name:
UpperCAmelCase_ = [3, 3, 9, 3]
UpperCAmelCase_ = [96, 192, 384, 768]
if "small" in model_name:
UpperCAmelCase_ = [3, 3, 27, 3]
UpperCAmelCase_ = [96, 192, 384, 768]
if "base" in model_name:
UpperCAmelCase_ = [3, 3, 27, 3]
UpperCAmelCase_ = [128, 256, 512, 1024]
UpperCAmelCase_ = 512
if "large" in model_name:
UpperCAmelCase_ = [3, 3, 27, 3]
UpperCAmelCase_ = [192, 384, 768, 1536]
UpperCAmelCase_ = 768
if "xlarge" in model_name:
UpperCAmelCase_ = [3, 3, 27, 3]
UpperCAmelCase_ = [256, 512, 1024, 2048]
UpperCAmelCase_ = 1024
# set label information
UpperCAmelCase_ = 150
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''ade20k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = ConvNextConfig(
depths=__UpperCAmelCase , hidden_sizes=__UpperCAmelCase , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
UpperCAmelCase_ = UperNetConfig(
backbone_config=__UpperCAmelCase , auxiliary_in_channels=__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase , )
return config
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.stages.{i}.{j}.gamma", f"backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.weight", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.depthwise_conv.bias", f"backbone.encoder.stages.{i}.layers.{j}.dwconv.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.weight", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.norm.bias", f"backbone.encoder.stages.{i}.layers.{j}.layernorm.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv1.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.weight", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight") )
rename_keys.append((f"backbone.stages.{i}.{j}.pointwise_conv2.bias", f"backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias") )
if i > 0:
rename_keys.append((f"backbone.downsample_layers.{i}.0.weight", f"backbone.encoder.stages.{i}.downsampling_layer.0.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.0.bias", f"backbone.encoder.stages.{i}.downsampling_layer.0.bias") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.weight", f"backbone.encoder.stages.{i}.downsampling_layer.1.weight") )
rename_keys.append((f"backbone.downsample_layers.{i}.1.bias", f"backbone.encoder.stages.{i}.downsampling_layer.1.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = dct.pop(__UpperCAmelCase )
UpperCAmelCase_ = val
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
UpperCAmelCase_ = model_name_to_url[model_name]
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )['''state_dict''']
UpperCAmelCase_ = get_upernet_config(__UpperCAmelCase )
UpperCAmelCase_ = UperNetForSemanticSegmentation(__UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(__UpperCAmelCase )
if "bn" in key:
UpperCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase_ = val
# rename keys
UpperCAmelCase_ = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
# verify on image
UpperCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('''RGB''' )
UpperCAmelCase_ = SegformerImageProcessor()
UpperCAmelCase_ = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase_ = model(__UpperCAmelCase )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase_ = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase_ = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase_ = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase_ = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase_ = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(f"openmmlab/{model_name}" )
processor.push_to_hub(f"openmmlab/{model_name}" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f"upernet-convnext-{size}" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCamelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 344 |
import pytest
UpperCamelCase_ = "__dummy_dataset1__"
UpperCamelCase_ = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def A ( ) -> str:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def A ( ) -> Any:
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = dataset_loading_script_name
UpperCAmelCase_ = tmp_path / '''datasets''' / script_name
script_dir.mkdir(parents=__UpperCAmelCase )
UpperCAmelCase_ = script_dir / f"{script_name}.py"
with open(__UpperCAmelCase , '''w''' ) as f:
f.write(__UpperCAmelCase )
return str(__UpperCAmelCase )
| 344 | 1 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = FunnelConfig.from_json_file(_lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
A__ = FunnelBaseModel(_lowerCAmelCase ) if base_model else FunnelModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--base_model""", action="""store_true""", help="""Whether you want just the base model (no decoder) or not."""
)
_lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 14 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 169 | 0 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ):
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase , _UpperCAmelCase : Dict = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Any = b, a + b
return sum(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__lowerCAmelCase = {
'''yjernite/retribert-base-uncased''': 512,
}
__lowerCAmelCase = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[str] = RetriBertTokenizer
lowerCAmelCase : Tuple = ['input_ids', 'attention_mask']
def __init__( self : str ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]="[UNK]" ,_UpperCAmelCase : List[Any]="[SEP]" ,_UpperCAmelCase : str="[PAD]" ,_UpperCAmelCase : int="[CLS]" ,_UpperCAmelCase : str="[MASK]" ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Tuple=None ,**_UpperCAmelCase : str ,):
super().__init__(
_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,do_lower_case=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,tokenize_chinese_chars=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' ,_UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_UpperCAmelCase ) != tokenize_chinese_chars
):
_a : List[Any] = getattr(_UpperCAmelCase ,normalizer_state.pop('type' ) )
_a : Union[str, Any] = do_lower_case
_a : Optional[int] = strip_accents
_a : Dict = tokenize_chinese_chars
_a : Dict = normalizer_class(**_UpperCAmelCase )
_a : Dict = do_lower_case
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=None ):
_a : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
_a : Optional[int] = [self.sep_token_id]
_a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : Tuple = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 89 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = cva.getAffineTransform(lowerCamelCase , lowerCamelCase )
return cva.warpAffine(lowerCamelCase , lowerCamelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
snake_case__ : List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
snake_case__ : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
snake_case__ , snake_case__ : str = gray_img.shape
# set different points to rotate image
snake_case__ : Any = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
snake_case__ : str = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
snake_case__ : int = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
snake_case__ : List[str] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
snake_case__ : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
snake_case__ : Optional[Any] = plt.figure(1)
snake_case__ : Dict = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 117 | 0 |
"""simple docstring"""
import socket
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Optional[int] = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_lowerCAmelCase : Optional[int] = socket.gethostname()
_lowerCAmelCase : Tuple = 12312
sock.connect((host, port) )
sock.send(b"""Hello server!""" )
with open("""Received_file""" ,"""wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
_lowerCAmelCase : List[Any] = sock.recv(1024 )
if not data:
break
out_file.write(_lowerCamelCase )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 353 | """simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = None , a__ = None , a__ = True , a__ = None , a__ = False , a__ = None , a__ = True , a__ = "arrow" , **a__ , ):
super().__init__(
split=a__ , features=a__ , cache_dir=a__ , keep_in_memory=a__ , streaming=a__ , **a__ , )
_lowerCAmelCase : List[Any] = load_from_cache_file
_lowerCAmelCase : str = file_format
_lowerCAmelCase : Dict = Spark(
df=a__ , features=a__ , cache_dir=a__ , working_dir=a__ , **a__ , )
def __A ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowerCAmelCase : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 126 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = """time_series_transformer"""
lowerCAmelCase__ : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__(self : Any , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "student_t" , UpperCamelCase : str = "nll" , UpperCamelCase : int = 1 , UpperCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase : Optional[Union[str, bool]] = "mean" , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : int = 32 , UpperCamelCase : int = 32 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : bool = True , UpperCamelCase : str = "gelu" , UpperCamelCase : int = 64 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : int = 100 , UpperCamelCase : float = 0.02 , UpperCamelCase : Tuple=True , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
lowercase__ = prediction_length
lowercase__ = context_length or prediction_length
lowercase__ = distribution_output
lowercase__ = loss
lowercase__ = input_size
lowercase__ = num_time_features
lowercase__ = lags_sequence
lowercase__ = scaling
lowercase__ = num_dynamic_real_features
lowercase__ = num_static_real_features
lowercase__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = cardinality
else:
lowercase__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase__ = embedding_dimension
else:
lowercase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ = num_parallel_samples
# Transformer architecture configuration
lowercase__ = input_size * len(UpperCamelCase ) + self._number_of_features
lowercase__ = d_model
lowercase__ = encoder_attention_heads
lowercase__ = decoder_attention_heads
lowercase__ = encoder_ffn_dim
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = decoder_layers
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = use_cache
super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase )
@property
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 2 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Dict = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 0 |
import os
import sys
lowercase : Any = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
lowercase : Optional[Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def UpperCAmelCase_ (*_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
return AutoConfig.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def UpperCAmelCase_ (*_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ):
return AutoTokenizer.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def UpperCAmelCase_ (*_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Any ):
return AutoModel.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def UpperCAmelCase_ (*_lowerCAmelCase : Union[str, Any] , **_lowerCAmelCase : Dict ):
return AutoModelForCausalLM.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def UpperCAmelCase_ (*_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple ):
return AutoModelForMaskedLM.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def UpperCAmelCase_ (*_lowerCAmelCase : Tuple , **_lowerCAmelCase : int ):
return AutoModelForSequenceClassification.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def UpperCAmelCase_ (*_lowerCAmelCase : List[str] , **_lowerCAmelCase : List[str] ):
return AutoModelForQuestionAnswering.from_pretrained(*_lowerCAmelCase , **_lowerCAmelCase ) | 358 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase : Union[str, Any] = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 171 | 0 |
'''simple docstring'''
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , A : Any , A : str , A : Union[str, Any] ):
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : Any = graph
self._normalize_graph(A , A )
_UpperCAmelCase : List[str] = len(A )
_UpperCAmelCase : Tuple = None
def _A ( self : Any , A : List[Any] , A : str ):
if sources is int:
_UpperCAmelCase : List[Any] = [sources]
if sinks is int:
_UpperCAmelCase : List[Any] = [sinks]
if len(A ) == 0 or len(A ) == 0:
return
_UpperCAmelCase : str = sources[0]
_UpperCAmelCase : Union[str, Any] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(A ) > 1 or len(A ) > 1:
_UpperCAmelCase : Dict = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_UpperCAmelCase : str = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_UpperCAmelCase : Optional[Any] = max_input_flow
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : str = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_UpperCAmelCase : Dict = max_input_flow
_UpperCAmelCase : List[Any] = size - 1
def _A ( self : Union[str, Any] ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _A ( self : Tuple , A : Dict ):
_UpperCAmelCase : str = algorithm(self )
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , A : str ):
_UpperCAmelCase : Optional[int] = flow_network
_UpperCAmelCase : Any = flow_network.verticesCount
_UpperCAmelCase : List[str] = flow_network.sourceIndex
_UpperCAmelCase : Union[str, Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_UpperCAmelCase : Any = flow_network.graph
_UpperCAmelCase : Union[str, Any] = False
def _A ( self : List[str] ):
if not self.executed:
self._algorithm()
_UpperCAmelCase : int = True
def _A ( self : List[Any] ):
pass
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[str, Any] ):
super().__init__(A )
# use this to save your result
_UpperCAmelCase : Any = -1
def _A ( self : Union[str, Any] ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Tuple , A : int ):
super().__init__(A )
_UpperCAmelCase : List[str] = [[0] * self.verticies_count for i in range(self.verticies_count )]
_UpperCAmelCase : Union[str, Any] = [0] * self.verticies_count
_UpperCAmelCase : int = [0] * self.verticies_count
def _A ( self : Dict ):
_UpperCAmelCase : Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_UpperCAmelCase : Optional[int] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_UpperCAmelCase : Any = 0
while i < len(A ):
_UpperCAmelCase : int = vertices_list[i]
_UpperCAmelCase : int = self.heights[vertex_index]
self.process_vertex(A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(A ) )
_UpperCAmelCase : Union[str, Any] = 0
else:
i += 1
_UpperCAmelCase : List[Any] = sum(self.preflow[self.source_index] )
def _A ( self : Union[str, Any] , A : str ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(A , A )
self.relabel(A )
def _A ( self : int , A : Dict , A : List[str] ):
_UpperCAmelCase : int = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _A ( self : Optional[int] , A : Union[str, Any] ):
_UpperCAmelCase : str = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_UpperCAmelCase : Tuple = self.heights[to_index]
if min_height is not None:
_UpperCAmelCase : Optional[Any] = min_height + 1
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = [0]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__SCREAMING_SNAKE_CASE : List[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__SCREAMING_SNAKE_CASE : Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__SCREAMING_SNAKE_CASE : Optional[Any] = flow_network.find_maximum_flow()
print(F'maximum flow is {maximum_flow}')
| 31 | '''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCamelCase_ ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCamelCase ( lowerCamelCase__ ):
_UpperCamelCase : List[Any] = ['pixel_values']
def __init__( self: int , _SCREAMING_SNAKE_CASE: Dict = True , _SCREAMING_SNAKE_CASE: Tuple = None , _SCREAMING_SNAKE_CASE: Optional[Any] = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE: int = True , _SCREAMING_SNAKE_CASE: Any = None , _SCREAMING_SNAKE_CASE: List[str] = True , _SCREAMING_SNAKE_CASE: Tuple = 1 / 255 , _SCREAMING_SNAKE_CASE: int = True , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: Union[str, Any] = None , _SCREAMING_SNAKE_CASE: Tuple = True , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_ = do_convert_rgb
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Dict = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE: List[str] = None , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase_ = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Tuple = None , **_SCREAMING_SNAKE_CASE: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Any = None , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> List[Any]:
"""simple docstring"""
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[Any] = None , **_SCREAMING_SNAKE_CASE: int , ) -> int:
"""simple docstring"""
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Dict = None , _SCREAMING_SNAKE_CASE: Dict = None , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: Any = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Dict = None , _SCREAMING_SNAKE_CASE: Optional[Any] = None , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: Any = None , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: Tuple = None , _SCREAMING_SNAKE_CASE: List[Any] = None , _SCREAMING_SNAKE_CASE: Any = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: int , ) -> int:
"""simple docstring"""
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase_ = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 356 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_UpperCAmelCase = False
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = 'ybelkada/fonts'
def lowerCAmelCase_ ( ) -> Dict:
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"Pix2StructImageProcessor. Please upgrade torch." )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
requires_backends(UpperCamelCase_ , ["torch"] )
_check_torch_version()
UpperCamelCase_ = image_tensor.unsqueeze(0 )
UpperCamelCase_ = torch.nn.functional.unfold(UpperCamelCase_ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCamelCase_ , UpperCamelCase_ , -1 )
UpperCamelCase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 36 , UpperCamelCase_ = "black" , UpperCamelCase_ = "white" , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = 5 , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Image.Image:
requires_backends(UpperCamelCase_ , "vision" )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase_ = textwrap.TextWrapper(width=80 )
UpperCamelCase_ = wrapper.wrap(text=UpperCamelCase_ )
UpperCamelCase_ = "\n".join(UpperCamelCase_ )
if font_bytes is not None and font_path is None:
UpperCamelCase_ = io.BytesIO(UpperCamelCase_ )
elif font_path is not None:
UpperCamelCase_ = font_path
else:
UpperCamelCase_ = hf_hub_download(UpperCamelCase_ , "Arial.TTF" )
UpperCamelCase_ = ImageFont.truetype(UpperCamelCase_ , encoding="UTF-8" , size=UpperCamelCase_ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase_ = ImageDraw.Draw(Image.new("RGB" , (1, 1) , UpperCamelCase_ ) )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = temp_draw.textbbox((0, 0) , UpperCamelCase_ , UpperCamelCase_ )
# Create the actual image with a bit of padding around the text.
UpperCamelCase_ = text_width + left_padding + right_padding
UpperCamelCase_ = text_height + top_padding + bottom_padding
UpperCamelCase_ = Image.new("RGB" , (image_width, image_height) , UpperCamelCase_ )
UpperCamelCase_ = ImageDraw.Draw(UpperCamelCase_ )
draw.text(xy=(left_padding, top_padding) , text=UpperCamelCase_ , fill=UpperCamelCase_ , font=UpperCamelCase_ )
return image
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
requires_backends(UpperCamelCase_ , "vision" )
# Convert to PIL image if necessary
UpperCamelCase_ = to_pil_image(UpperCamelCase_ )
UpperCamelCase_ = render_text(UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase_ = max(header_image.width , image.width )
UpperCamelCase_ = int(image.height * (new_width / image.width) )
UpperCamelCase_ = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase_ = Image.new("RGB" , (new_width, new_height + new_header_height) , "white" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase_ = to_numpy_array(UpperCamelCase_ )
if infer_channel_dimension_format(UpperCamelCase_ ) == ChannelDimension.LAST:
UpperCamelCase_ = to_channel_dimension_format(UpperCamelCase_ , ChannelDimension.LAST )
return new_image
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : str = ['''flattened_patches''']
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: int = 2048 , _SCREAMING_SNAKE_CASE: bool = False , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patch_size if patch_size is not None else {"height": 16, "width": 16}
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_convert_rgb
UpperCamelCase_ = max_patches
UpperCamelCase_ = is_vqa
def lowercase ( self: Dict , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: dict , **_SCREAMING_SNAKE_CASE: Union[str, Any] ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , "torch" )
_check_torch_version()
# convert to torch
UpperCamelCase_ = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , ChannelDimension.FIRST )
UpperCamelCase_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ , UpperCamelCase_ = patch_size["height"], patch_size["width"]
UpperCamelCase_ , UpperCamelCase_ = get_image_size(_SCREAMING_SNAKE_CASE )
# maximize scale s.t.
UpperCamelCase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase_ = max(min(math.floor(scale * image_height / patch_height ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(min(math.floor(scale * image_width / patch_width ) , _SCREAMING_SNAKE_CASE ) , 1 )
UpperCamelCase_ = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase_ = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="bilinear" , align_corners=_SCREAMING_SNAKE_CASE , antialias=_SCREAMING_SNAKE_CASE , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = torch_extract_patches(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = patches.shape
UpperCamelCase_ = patches_shape[1]
UpperCamelCase_ = patches_shape[2]
UpperCamelCase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([rows, 1] ).repeat(1 , _SCREAMING_SNAKE_CASE ).reshape([rows * columns, 1] )
UpperCamelCase_ = torch.arange(_SCREAMING_SNAKE_CASE ).reshape([1, columns] ).repeat(_SCREAMING_SNAKE_CASE , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase_ = row_ids.to(torch.floataa )
UpperCamelCase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase_ = torch.nn.functional.pad(_SCREAMING_SNAKE_CASE , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase_ = to_numpy_array(_SCREAMING_SNAKE_CASE )
return result
def lowercase ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase_ = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase_ = np.mean(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = np.std(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[int] , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: Optional[str] = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[bool] = None , _SCREAMING_SNAKE_CASE: Optional[int] = None , _SCREAMING_SNAKE_CASE: Optional[Dict[str, int]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: ChannelDimension = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> ImageInput:
"""simple docstring"""
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = patch_size if patch_size is not None else self.patch_size
UpperCamelCase_ = max_patches if max_patches is not None else self.max_patches
UpperCamelCase_ = self.is_vqa
if kwargs.get("data_format" , _SCREAMING_SNAKE_CASE ) is not None:
raise ValueError("data_format is not an accepted input as the outputs are " )
UpperCamelCase_ = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("A header text must be provided for VQA models." )
UpperCamelCase_ = kwargs.pop("font_bytes" , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = kwargs.pop("font_path" , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = [header_text] * len(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = [
render_header(_SCREAMING_SNAKE_CASE , header_text[i] , font_bytes=_SCREAMING_SNAKE_CASE , font_path=_SCREAMING_SNAKE_CASE )
for i, image in enumerate(_SCREAMING_SNAKE_CASE )
]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images]
# convert to torch tensor and permute
UpperCamelCase_ = [
self.extract_flattened_patches(image=_SCREAMING_SNAKE_CASE , max_patches=_SCREAMING_SNAKE_CASE , patch_size=_SCREAMING_SNAKE_CASE )
for image in images
]
# create attention mask in numpy
UpperCamelCase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase_ = BatchFeature(
data={"flattened_patches": images, "attention_mask": attention_masks} , tensor_type=_SCREAMING_SNAKE_CASE )
return encoded_outputs
| 328 | 0 |
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
snake_case__ : Tuple = 0
while b > 0:
if b & 1:
snake_case__ : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 143 | import random
from .binary_exp_mod import bin_exp_mod
def UpperCamelCase__ ( A__ , A__=1000 ) -> Optional[int]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case__ : List[Any] = n - 1
snake_case__ : Optional[int] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case__ : Union[str, Any] = 0
while count < prec:
snake_case__ : Dict = random.randint(2 , n - 1 )
snake_case__ : Dict = bin_exp_mod(A__ , A__ , A__ )
if b != 1:
snake_case__ : Tuple = True
for _ in range(A__ ):
if b == n - 1:
snake_case__ : List[str] = False
break
snake_case__ : Dict = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase__ : str = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 143 | 1 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase : List[Any] = "Usage of script: script_name <size_of_canvas:int>"
lowerCamelCase : List[str] = [0] * 100 + [1] * 10
random.shuffle(choice)
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = [[False for i in range(lowercase )] for j in range(lowercase )]
return canvas
def _SCREAMING_SNAKE_CASE ( lowercase : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(lowercase ):
for j, _ in enumerate(lowercase ):
lowerCamelCase_ = bool(random.getrandbits(1 ) )
def _SCREAMING_SNAKE_CASE ( lowercase : list[list[bool]] ):
'''simple docstring'''
lowerCamelCase_ = np.array(lowercase )
lowerCamelCase_ = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowercase ):
for c, pt in enumerate(lowercase ):
lowerCamelCase_ = __judge_point(
lowercase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowerCamelCase_ = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowerCamelCase_ = current_canvas.tolist()
return return_canvas
def _SCREAMING_SNAKE_CASE ( lowercase : bool , lowercase : list[list[bool]] ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowerCamelCase_ = pt
if pt:
if alive < 2:
lowerCamelCase_ = False
elif alive == 2 or alive == 3:
lowerCamelCase_ = True
elif alive > 3:
lowerCamelCase_ = False
else:
if alive == 3:
lowerCamelCase_ = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase : Dict = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase : int = create_canvas(canvas_size)
seed(c)
lowerCamelCase , lowerCamelCase : str = plt.subplots()
fig.show()
lowerCamelCase : str = ListedColormap(["w", "k"])
try:
while True:
lowerCamelCase : Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 208 |
import math
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ):
'''simple docstring'''
return math.pow(lowercase , 2 ) - a
def _SCREAMING_SNAKE_CASE ( lowercase : float ):
'''simple docstring'''
return 2 * x
def _SCREAMING_SNAKE_CASE ( lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = 2.0
while start <= a:
lowerCamelCase_ = math.pow(lowercase , 2 )
return start
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : int = 99_99 , lowercase : float = 0.00_0000_0000_0001 ):
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
lowerCamelCase_ = get_initial_point(lowercase )
for _ in range(lowercase ):
lowerCamelCase_ = value
lowerCamelCase_ = value - fx(lowercase , lowercase ) / fx_derivative(lowercase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
a_ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(a__)
a_ , a_ : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
a__ , output_loading_info=a__)
else:
a_ : int = ProphetNetForConditionalGenerationOld.from_pretrained(a__)
a_ , a_ : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained(
a__ , output_loading_info=a__)
a_ : int = ["""key_proj""", """value_proj""", """query_proj"""]
a_ : Union[str, Any] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
a_ : List[str] = key.split(""".""")
if attributes[0] == "lm_head":
a_ : Optional[Any] = prophet
a_ : List[str] = prophet_old
else:
a_ : List[Any] = prophet.prophetnet
a_ : Tuple = prophet_old.model
a_ : str = False
for attribute in attributes:
if attribute in mapping:
a_ : Dict = mapping[attribute]
if not hasattr(a__ , a__) and len(a__) > 0:
a_ : Optional[int] = attribute
elif hasattr(a__ , a__):
a_ : Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a_ : int = old_model.weight
logger.info(f'''{attribute} is initialized.''')
a_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a_ : Tuple = old_model.bias
logger.info(f'''{attribute} is initialized''')
a_ : int = True
break
elif attribute in special_keys and hasattr(a__ , """in_proj_weight"""):
a_ : str = old_model.in_proj_weight.shape[0] // 3
a_ : List[Any] = getattr(a__ , a__)
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a_ : Optional[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :])
a_ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim])
elif attribute == "key_proj":
a_ : List[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :])
a_ : Tuple = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim])
elif attribute == "value_proj":
a_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :])
a_ : List[str] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :])
a_ : List[str] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
a_ : Tuple = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :])
a_ : Tuple = True
break
if attribute.isdigit():
a_ : int = model[int(a__)]
a_ : str = old_model[int(a__)]
else:
a_ : Any = getattr(a__ , a__)
if old_attribute == "":
a_ : str = old_model
else:
if not hasattr(a__ , a__):
raise ValueError(f'''{old_model} does not have {old_attribute}''')
a_ : str = getattr(a__ , a__)
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''')
print(f'''Saving model to {pytorch_dump_folder_path}''')
prophet.save_pretrained(a__)
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 248 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__snake_case : Dict = """<<<<<<< This should probably be modified because it mentions: """
__snake_case : Any = """=======
>>>>>>>
"""
__snake_case : Any = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__snake_case : Dict = [
# (pattern, replacement)
# Order is important here for some replacements
(r"""tfds\.core""", r"""datasets"""),
(r"""tf\.io\.gfile\.GFile""", r"""open"""),
(r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""),
(r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""),
(r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""),
(r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""),
(r"""tfds\.features\.FeaturesDict\(""", r"""dict("""),
(r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(r"""tfds\.""", r"""datasets."""),
(r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""),
(r"""self\.builder_config""", r"""self.config"""),
]
def _UpperCAmelCase ( a__):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory)
class A__(a_ ):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( _lowercase ) -> Dict:
a_ : Optional[Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=_lowercase , required=_lowercase , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=_lowercase , required=_lowercase , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , *_lowercase ) -> str:
a_ : List[Any] = get_logger("""datasets-cli/converting""" )
a_ : Optional[Any] = tfds_path
a_ : List[Any] = datasets_directory
def UpperCamelCase__ ( self ) -> Dict:
if os.path.isdir(self._tfds_path ):
a_ : List[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a_ : Dict = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
a_ : List[Any] = os.path.abspath(self._datasets_directory )
self._logger.info(F'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' )
a_ : Dict = []
a_ : Tuple = []
a_ : str = {}
if os.path.isdir(self._tfds_path ):
a_ : str = os.listdir(_lowercase )
else:
a_ : int = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'''Looking at file {f_name}''' )
a_ : List[str] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
if not os.path.isfile(_lowercase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(_lowercase , encoding="""utf-8""" ) as f:
a_ : Any = f.readlines()
a_ : Any = []
a_ : str = False
a_ : List[str] = False
a_ : List[Any] = []
for line in lines:
a_ : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a_ : List[Any] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
a_ : Optional[int] = """"""
continue
elif "from absl import logging" in out_line:
a_ : List[str] = """from datasets import logging\n"""
elif "getLogger" in out_line:
a_ : List[str] = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a_ : Dict = True
a_ : Optional[Any] = list(filter(lambda _lowercase : e in out_line , _lowercase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_lowercase ) + """\n""" )
out_lines.append(_lowercase )
out_lines.append(_lowercase )
continue
else:
for pattern, replacement in TO_CONVERT:
a_ : List[str] = re.sub(_lowercase , _lowercase , _lowercase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a_ : Tuple = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , _lowercase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
a_ : Optional[int] = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'''Error converting {out_line.strip()}''' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a_ : Optional[Any] = True
out_lines.append(_lowercase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a_ : List[str] = f_name.replace(""".py""" , """""" )
a_ : Optional[int] = os.path.join(_lowercase , _lowercase )
a_ : Dict = os.path.join(_lowercase , _lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
self._logger.info(F'''Adding directory {output_dir}''' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_lowercase )
if needs_manual_update:
with_manual_update.append(_lowercase )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.writelines(_lowercase )
self._logger.info(F'''Converted in {output_file}''' )
for utils_file in utils_files:
try:
a_ : Optional[int] = os.path.basename(_lowercase )
a_ : List[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(F'''Moving {dest_folder} to {utils_file}''' )
shutil.copy(_lowercase , _lowercase )
except KeyError:
self._logger.error(F'''Cannot find destination folder for {utils_file}. Please copy manually.''' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
| 248 | 1 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: List[Any] = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
_lowercase: Optional[Any] = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def a( A : Tuple , A : Dict ) -> Tuple:
"""simple docstring"""
a = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
a = int(re.match(r".*layer_(\d*).*" , A )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def a( A : int ) -> List[Any]:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
a = re.search(r"[^\d](\d+)$" , str(A ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
a = int(bit_search.groups()[0] )
return bit_size // 8
def a( A : Optional[int] , A : Any , A : Any , A : str , A : str ) -> Any:
"""simple docstring"""
if bloom_config_file == "":
a = BloomConfig()
else:
a = BloomConfig.from_json_file(A )
if shard_model:
a = os.listdir(A )
a = sorted(filter(lambda A : s.startswith("layer" ) and "model_00" in s , A ) )
a = {"weight_map": {}, "metadata": {}}
a = 0
a = None
a = BloomConfig()
for j, file in enumerate(A ):
print("Processing file: {}".format(A ) )
a = None
for i in range(A ):
# load all TP files
a = file.replace("model_00" , f'''model_0{i}''' )
a = torch.load(os.path.join(A , A ) , map_location="cpu" )
# Rename keys in the transformers names
a = list(temp.keys() )
for key in keys:
a = temp.pop(A )
if tensors is None:
a = temp
else:
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
a = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
a = tensors[key] / pretraining_tp
torch.save(
A , os.path.join(
A , "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
a = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
a = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) )
a = BloomConfig()
a = pytorch_dump_folder_path + "/" + CONFIG_NAME
a = total_size
with open(A , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A , WEIGHTS_NAME + ".index.json" ) , "w" , encoding="utf-8" ) as f:
a = json.dumps(A , indent=2 , sort_keys=A ) + "\n"
f.write(A )
else:
a = BloomModel(A )
a = os.listdir(A )
a = sorted(filter(lambda A : s.startswith("layer" ) and "model_00" in s , A ) )
a = None
for i, file in enumerate(A ):
a = None
for i in range(A ):
# load all TP files
a = file.replace("model_00" , f'''model_0{i}''' )
a = torch.load(os.path.join(A , A ) , map_location="cpu" )
# Rename keys in the transformers names
a = list(temp.keys() )
for key in keys:
a = temp.pop(A )
if tensors is None:
a = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
a = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
a = tensors[key] / pretraining_tp
a = model.load_state_dict(A , strict=A )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
a = set(other_keys.missing_keys )
else:
a = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(A , exist_ok=A )
a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
a = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
a = model.to(config.torch_dtype )
torch.save(model.state_dict() , A )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(A , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
_lowercase: Optional[int] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 71 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Dict = logging.get_logger()
@dataclass
class _lowercase :
"""simple docstring"""
__A = 42
__A = field(default_factory=lowerCAmelCase )
__A = field(default_factory=lowerCAmelCase )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = len(list(m.modules() ) ) == 1 or isinstance(lowerCamelCase_ , nn.Convad ) or isinstance(lowerCamelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCamelCase_ )
def __call__(self , lowerCamelCase_ ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return list(filter(lambda lowerCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowercase :
"""simple docstring"""
__A = 42
__A = 42
__A = 1
__A = field(default_factory=lowerCAmelCase )
__A = field(default_factory=lowerCAmelCase )
__A = True
def __call__(self , lowerCamelCase_ ):
"""simple docstring"""
a = Tracker(self.dest )(lowerCamelCase_ ).parametrized
a = Tracker(self.src )(lowerCamelCase_ ).parametrized
a = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.src_skip , lowerCamelCase_ ) )
a = list(filter(lambda lowerCamelCase_ : type(lowerCamelCase_ ) not in self.dest_skip , lowerCamelCase_ ) )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCamelCase_ )} operations while'''
F''' destination module has {len(lowerCamelCase_ )}.''' )
for dest_m, src_m in zip(lowerCamelCase_ , lowerCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F'''Unexpected layer name {k}'''
a = len(lowerCamelCase_ ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
a = nn.ModuleDict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return get_trunk_forward_outputs(
lowerCamelCase_ , out_feat_keys=lowerCamelCase_ , feature_blocks=self._feature_blocks , )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if x not in self:
a = self.convert_name_to_timm(lowerCamelCase_ )
a = partial(lambda: (timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval(), None) )
else:
a = super().__getitem__(lowerCamelCase_ )
return val
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __getitem__(self , lowerCamelCase_ ):
"""simple docstring"""
if "seer" in x and "in1k" not in x:
a = RegNetModel
else:
a = RegNetForImageClassification
return val
def a( A : Dict , A : List[Any] , A : List[Tuple[str, str]] ) -> Union[str, Any]:
"""simple docstring"""
for from_key, to_key in keys:
a = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def a( A : str , A : Callable[[], nn.Module] , A : Callable[[], nn.Module] , A : RegNetConfig , A : Path , A : bool = True , ) -> List[str]:
"""simple docstring"""
print(f'''Converting {name}...''' )
with torch.no_grad():
a , a = from_model_func()
a = our_model_func(A ).eval()
a = ModuleTransfer(src=A , dest=A , raise_if_mismatch=A )
a = torch.randn((1, 3, 224, 224) )
module_transfer(A )
if from_state_dict is not None:
a = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
a = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
a = manually_copy_vissl_head(A , our_model.state_dict() , A )
our_model.load_state_dict(A )
a = our_model(A , output_hidden_states=A )
a = (
our_outputs.logits if isinstance(A , A ) else our_outputs.last_hidden_state
)
a = from_model(A )
a = from_output[-1] if type(A ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
a = our_outputs.hidden_states[-1]
assert torch.allclose(A , A ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=A , )
a = 224 if "seer" not in name else 384
# we can use the convnext one
a = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=A )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=A , )
print(f'''Pushed {name}''' )
def a( A : Path , A : str = None , A : bool = True ) -> Dict:
"""simple docstring"""
a = "imagenet-1k-id2label.json"
a = 1000
a = (1, num_labels)
a = "huggingface/label-files"
a = num_labels
a = json.load(open(cached_download(hf_hub_url(A , A , repo_type="dataset" ) ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = partial(A , num_labels=A , idalabel=A , labelaid=A )
a = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
a = NameToOurModelFuncMap()
a = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(A : str , A : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
a = torch.hub.load_state_dict_from_url(A , model_dir=str(A ) , map_location="cpu" )
a = model_func()
# check if we have a head, if yes add it
a = files["classy_state_dict"]["base_model"]["model"]
a = model_state_dict["trunk"]
model.load_state_dict(A )
return model.eval(), model_state_dict["heads"]
# pretrained
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
a = partial(
A , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , A , A , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
A , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , A , A , A , )
return config, expected_shape
if __name__ == "__main__":
_lowercase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_lowercase: Optional[int] = parser.parse_args()
_lowercase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 71 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def _A ( snake_case , snake_case ) -> Dict:
_lowercase : Tuple = b.T
_lowercase : Union[str, Any] = np.sum(np.square(snake_case ) , axis=1 )
_lowercase : str = np.sum(np.square(snake_case ) , axis=0 )
_lowercase : Tuple = np.matmul(snake_case , snake_case )
_lowercase : int = aa[:, None] - 2 * ab + ba[None, :]
return d
def _A ( snake_case , snake_case ) -> int:
_lowercase : int = x.reshape(-1 , 3 )
_lowercase : Union[str, Any] = squared_euclidean_distance(snake_case , snake_case )
return np.argmin(snake_case , axis=1 )
class a__ ( UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['pixel_values']
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = True , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
_lowercase : Any = size if size is not None else {"""height""": 256, """width""": 256}
_lowercase : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE )
_lowercase : Any = np.array(_SCREAMING_SNAKE_CASE ) if clusters is not None else None
_lowercase : Optional[Any] = do_resize
_lowercase : str = size
_lowercase : Optional[Any] = resample
_lowercase : Optional[int] = do_normalize
_lowercase : List[str] = do_color_quantize
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , ):
"""simple docstring"""
_lowercase : List[Any] = rescale(image=_SCREAMING_SNAKE_CASE , scale=1 / 1_2_7.5 , data_format=_SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = image - 1
return image
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = do_resize if do_resize is not None else self.do_resize
_lowercase : List[str] = size if size is not None else self.size
_lowercase : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = resample if resample is not None else self.resample
_lowercase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : List[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
_lowercase : Optional[int] = clusters if clusters is not None else self.clusters
_lowercase : Any = np.array(_SCREAMING_SNAKE_CASE )
_lowercase : int = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
_lowercase : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
_lowercase : List[Any] = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
_lowercase : Union[str, Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE ) for image in images]
if do_color_quantize:
_lowercase : List[Any] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
_lowercase : Dict = np.array(_SCREAMING_SNAKE_CASE )
_lowercase : int = color_quantize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
_lowercase : int = images.shape[0]
_lowercase : List[Any] = images.reshape(_SCREAMING_SNAKE_CASE , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
_lowercase : List[str] = list(_SCREAMING_SNAKE_CASE )
else:
_lowercase : List[str] = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
_lowercase : Optional[Any] = {"""input_ids""": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 250 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
A: Tuple = logging.get_logger(__name__)
A: List[Any] = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = 'blenderbot-small'
__lowerCAmelCase : Any = ['past_key_values']
__lowerCAmelCase : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ) -> str:
'''simple docstring'''
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Any = max_position_embeddings
UpperCAmelCase : str = d_model
UpperCAmelCase : List[Any] = encoder_ffn_dim
UpperCAmelCase : Union[str, Any] = encoder_layers
UpperCAmelCase : List[str] = encoder_attention_heads
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : List[Any] = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : Union[str, Any] = dropout
UpperCAmelCase : Union[str, Any] = attention_dropout
UpperCAmelCase : int = activation_dropout
UpperCAmelCase : int = activation_function
UpperCAmelCase : Optional[Any] = init_std
UpperCAmelCase : List[str] = encoder_layerdrop
UpperCAmelCase : List[str] = decoder_layerdrop
UpperCAmelCase : List[str] = use_cache
UpperCAmelCase : Union[str, Any] = encoder_layers
UpperCAmelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase : int = {0: """batch"""}
UpperCAmelCase : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCAmelCase : Optional[Any] = {0: """batch""", 1: """decoder_sequence"""}
UpperCAmelCase : Any = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCAmelCase : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
UpperCAmelCase , UpperCAmelCase : Tuple = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCAmelCase : Dict = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Union[str, Any] = super().outputs
else:
UpperCAmelCase : int = super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
UpperCAmelCase , UpperCAmelCase : Any = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCAmelCase : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
UpperCAmelCase : Optional[Any] = seq_length if not self.use_past else 1
UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCAmelCase : Union[str, Any] = dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase , UpperCAmelCase : int = common_inputs["""input_ids"""].shape
UpperCAmelCase : Any = common_inputs["""decoder_input_ids"""].shape[1]
UpperCAmelCase , UpperCAmelCase : str = self.num_attention_heads
UpperCAmelCase : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase : Union[str, Any] = decoder_seq_length + 3
UpperCAmelCase : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCAmelCase : Optional[Any] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCAmelCase : List[str] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCAmelCase , UpperCAmelCase : List[Any] = self.num_layers
UpperCAmelCase : Optional[Any] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
UpperCAmelCase : Optional[int] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
UpperCAmelCase : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
UpperCAmelCase , UpperCAmelCase : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase : Union[str, Any] = seqlen + 2
UpperCAmelCase , UpperCAmelCase : Optional[int] = self.num_layers
UpperCAmelCase , UpperCAmelCase : Dict = self.num_attention_heads
UpperCAmelCase : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCAmelCase : List[Any] = common_inputs["""attention_mask"""].dtype
UpperCAmelCase : Any = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCAmelCase : List[str] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : List[str] = tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : Dict = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCAmelCase : Optional[int] = dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
UpperCAmelCase : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCAmelCase : List[Any] = super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : Dict = super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 109 | 0 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''naver-clova-ix/donut-base'''
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = DonutProcessor.from_pretrained(lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
_lowerCamelCase = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
_lowerCamelCase = self.processor.tokenajson(lowerCamelCase__ )
self.assertDictEqual(lowerCamelCase__ , lowerCamelCase__ )
| 73 |
"""simple docstring"""
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCAmelCase_( lowercase_ : List[str]="" ) -> str:
_lowerCamelCase = tempfile.mkdtemp()
return os.path.join(lowercase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_lowerCamelCase , _lowerCamelCase = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , torch.tensor(lowerCamelCase__ ) , atol=1e-4 ) )
def snake_case__ ( self ):
_lowerCamelCase = torch.rand(1_2 , dtype=torch.floataa ) - 0.5
_lowerCamelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCamelCase__ , lowerCamelCase__ , 1_6_0_0_0 )
_lowerCamelCase = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , lowerCamelCase__ )
@require_vision
@require_torch
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = torch.randint(0 , 2_5_6 , (6_4, 6_4, 3) )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
_lowerCamelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def snake_case__ ( self ):
_lowerCamelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
_lowerCamelCase = Image.open(lowerCamelCase__ )
_lowerCamelCase = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = '''Hey!'''
_lowerCamelCase = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , agent_type.to_string() )
self.assertEqual(lowerCamelCase__ , agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 73 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger('''transformers.models.speecht5''')
__UpperCamelCase = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__UpperCamelCase = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__UpperCamelCase = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__UpperCamelCase = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__UpperCamelCase = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__UpperCamelCase = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__UpperCamelCase = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__UpperCamelCase = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__UpperCamelCase = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCamelCase = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__UpperCamelCase = []
__UpperCamelCase = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__UpperCamelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__UpperCamelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__UpperCamelCase = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
for attribute in key.split('.' ):
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
snake_case_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
snake_case_ = value
elif weight_type == "weight_g":
snake_case_ = value
elif weight_type == "weight_v":
snake_case_ = value
elif weight_type == "bias":
snake_case_ = value
elif weight_type == "running_mean":
snake_case_ = value
elif weight_type == "running_var":
snake_case_ = value
elif weight_type == "num_batches_tracked":
snake_case_ = value
else:
snake_case_ = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case_ , snake_case_ = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = []
if task == "s2t":
snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ = MAPPING_S2T
snake_case_ = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case_ = None
snake_case_ = MAPPING_T2S
snake_case_ = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case_ = hf_model.speechta.encoder.prenet.feature_encoder
snake_case_ = MAPPING_S2S
snake_case_ = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCAmelCase , UpperCAmelCase ):
logger.info(f'{name} was ignored' )
continue
snake_case_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
snake_case_ = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case_ , snake_case_ = key.split('.*.' )
if prefix in name and suffix in name:
snake_case_ = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case_ = True
if "*" in mapped_key:
snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2]
snake_case_ = mapped_key.replace('*' , UpperCAmelCase )
if "weight_g" in name:
snake_case_ = 'weight_g'
elif "weight_v" in name:
snake_case_ = 'weight_v'
elif "bias" in name:
snake_case_ = 'bias'
elif "weight" in name:
snake_case_ = 'weight'
elif "running_mean" in name:
snake_case_ = 'running_mean'
elif "running_var" in name:
snake_case_ = 'running_var'
elif "num_batches_tracked" in name:
snake_case_ = 'num_batches_tracked'
else:
snake_case_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = full_name.split('conv_layers.' )[-1]
snake_case_ = name.split('.' )
snake_case_ = int(items[0] )
snake_case_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
snake_case_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , ) -> Dict:
if config_path is not None:
snake_case_ = SpeechTaConfig.from_pretrained(UpperCAmelCase )
else:
snake_case_ = SpeechTaConfig()
if task == "s2t":
snake_case_ = config.max_text_positions
snake_case_ = SpeechTaForSpeechToText(UpperCAmelCase )
elif task == "t2s":
snake_case_ = 1876
snake_case_ = 600
snake_case_ = config.max_speech_positions
snake_case_ = SpeechTaForTextToSpeech(UpperCAmelCase )
elif task == "s2s":
snake_case_ = 1876
snake_case_ = config.max_speech_positions
snake_case_ = SpeechTaForSpeechToSpeech(UpperCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
snake_case_ = SpeechTaTokenizer(UpperCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case_ = AddedToken('<mask>' , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )
snake_case_ = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
snake_case_ = SpeechTaFeatureExtractor()
snake_case_ = SpeechTaProcessor(tokenizer=UpperCAmelCase , feature_extractor=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
snake_case_ = torch.load(UpperCAmelCase )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCAmelCase , UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCAmelCase )
model.push_to_hub(UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__UpperCamelCase = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 69 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309 | 0 |
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : Any = len(UpperCAmelCase )
while cur > 1:
# Find the maximum number in arr
lowerCamelCase__ : List[Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCamelCase__ : Dict = arr[mi::-1] + arr[mi + 1 : len(UpperCAmelCase )]
# Reverse whole list
lowerCamelCase__ : List[str] = arr[cur - 1 :: -1] + arr[cur : len(UpperCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_A : List[str] = input('Enter numbers separated by a comma:\n').strip()
_A : Any = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 265 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_A : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = ["audio_values", "audio_mask"]
def __init__( self : Any , A : Union[str, Any]=2_0_4_8 , A : Any=1 , A : int=[1_6, 1_6] , A : Any=1_2_8 , A : List[Any]=4_4_1_0_0 , A : Dict=8_6 , A : Dict=2_0_4_8 , A : str=0.0 , **A : Union[str, Any] , ) ->List[Any]:
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase__ : Dict = spectrogram_length
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Any = feature_size // self.patch_size[1]
lowerCamelCase__ : Union[str, Any] = n_fft
lowerCamelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase__ : Optional[Any] = sampling_rate
lowerCamelCase__ : Optional[Any] = padding_value
lowerCamelCase__ : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=A , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCamelCase ( self : List[Any] , A : np.array ) ->np.ndarray:
lowerCamelCase__ : Any = spectrogram(
A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCamelCase__ : Any = log_spec[:, :-1]
lowerCamelCase__ : int = log_spec - 20.0
lowerCamelCase__ : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Union[str, Any] , A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A : Optional[Union[str, TensorType]] = None , A : Optional[bool] = True , A : Optional[int] = None , A : bool = False , A : bool = False , **A : str , ) ->BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase__ : List[str] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
lowerCamelCase__ : Any = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase__ : Optional[int] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase__ : Optional[int] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase__ : int = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase__ : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase__ : Dict = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase__ : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase__ : List[str] = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase__ : str = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase__ : Optional[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase__ : Tuple = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase__ : int = audio_features[i]
lowerCamelCase__ : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase__ : Optional[int] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCamelCase__ : Tuple = {'''audio_values''': padded_audio_features}
lowerCamelCase__ : Union[str, Any] = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 265 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = 1
lowerCAmelCase_ :Any = 3
lowerCAmelCase_ :Tuple = (32, 32)
lowerCAmelCase_ :Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A )
return image
@property
def __lowerCAmelCase ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__A )
@property
def __lowerCAmelCase ( self ) -> int:
def extract(*__A , **__A ):
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> str:
lowerCAmelCase_ :List[str] = torch.ones([0] )
def __lowerCAmelCase ( self , __A ) -> int:
self.pixel_values.to(__A )
return self
return Out()
return extract
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Dict = self.dummy_cond_unet
lowerCAmelCase_ :List[Any] = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :int = self.dummy_vae
lowerCAmelCase_ :Union[str, Any] = self.dummy_text_encoder
lowerCAmelCase_ :Optional[int] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCAmelCase_ :Dict = 77
lowerCAmelCase_ :Tuple = self.dummy_image.to(__A )
lowerCAmelCase_ :Any = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ :Dict = AltDiffusionImgaImgPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ :Dict = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__A )
lowerCAmelCase_ :Any = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Optional[Any] = torch.Generator(device=__A ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = alt_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__A , )
lowerCAmelCase_ :Tuple = output.images
lowerCAmelCase_ :str = torch.Generator(device=__A ).manual_seed(0 )
lowerCAmelCase_ :str = alt_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__A , return_dict=__A , )[0]
lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase_ :List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ :str = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Tuple = self.dummy_cond_unet
lowerCAmelCase_ :int = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :int = self.dummy_vae
lowerCAmelCase_ :Dict = self.dummy_text_encoder
lowerCAmelCase_ :List[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCAmelCase_ :Optional[Any] = 77
lowerCAmelCase_ :Optional[int] = self.dummy_image.to(__A )
# put models in fp16
lowerCAmelCase_ :Any = unet.half()
lowerCAmelCase_ :Union[str, Any] = vae.half()
lowerCAmelCase_ :Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ :Tuple = AltDiffusionImgaImgPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
lowerCAmelCase_ :List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__A )
lowerCAmelCase_ :Union[str, Any] = alt_pipe.to(__A )
alt_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Dict = torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = alt_pipe(
[prompt] , generator=__A , num_inference_steps=2 , output_type="""np""" , image=__A , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase_ :Tuple = init_image.resize((760, 504) )
lowerCAmelCase_ :str = """BAAI/AltDiffusion"""
lowerCAmelCase_ :str = AltDiffusionImgaImgPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :Dict = torch.manual_seed(0 )
lowerCAmelCase_ :str = pipe(
prompt=__A , image=__A , strength=0.7_5 , guidance_scale=7.5 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Dict = output.images[0]
lowerCAmelCase_ :List[str] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowerCAmelCase_ :int = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :Union[str, Any] = init_image.resize((768, 512) )
lowerCAmelCase_ :str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowerCAmelCase_ :Union[str, Any] = """BAAI/AltDiffusion"""
lowerCAmelCase_ :Any = AltDiffusionImgaImgPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowerCAmelCase_ :Tuple = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :int = torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = pipe(
prompt=__A , image=__A , strength=0.7_5 , guidance_scale=7.5 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Optional[int] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 84 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=14 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=0.0_2 , ):
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_input_mask
__UpperCamelCase = use_token_type_ids
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = rotary_dim
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = initializer_range
__UpperCamelCase = None
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
__UpperCamelCase = vocab_size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = None
if self.use_input_mask:
__UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , attention_mask=__UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = 20
__UpperCamelCase = model_class_name(__UpperCAmelCase )
__UpperCamelCase = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__UpperCamelCase = model.init_cache(input_ids.shape[0] , __UpperCAmelCase )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__UpperCamelCase = model(
input_ids[:, :-1] , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__UpperCAmelCase , position_ids=__UpperCAmelCase , )
__UpperCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowercase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = FlaxGPTJModelTester(self )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__UpperCamelCase = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=__UpperCAmelCase , truncation=__UpperCAmelCase )
__UpperCamelCase = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = False
__UpperCamelCase = model.config.eos_token_id
__UpperCamelCase = jax.jit(model.generate )
__UpperCamelCase = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__UpperCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__UpperCamelCase = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
__UpperCamelCase = fx_state
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = model_class.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
__UpperCamelCase = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__UpperCamelCase = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__UpperCamelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
__UpperCamelCase = getattr(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = pt_model_class(__UpperCAmelCase ).eval()
__UpperCamelCase = model_class(__UpperCAmelCase , dtype=jnp.floataa )
__UpperCamelCase = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
__UpperCamelCase , __UpperCamelCase = pt_inputs['input_ids'].shape
__UpperCamelCase = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__UpperCAmelCase ):
__UpperCamelCase = 0
__UpperCamelCase = 1
__UpperCamelCase = 0
__UpperCamelCase = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__UpperCamelCase = pt_model(**__UpperCAmelCase ).to_tuple()
__UpperCamelCase = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
__UpperCamelCase = pt_model_class.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
with torch.no_grad():
__UpperCamelCase = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(
len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCAmelCase )
| 316 | 0 |
import math
from datetime import datetime, timedelta
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = year % 19
SCREAMING_SNAKE_CASE : List[str] = year % 4
SCREAMING_SNAKE_CASE : str = year % 7
SCREAMING_SNAKE_CASE : List[Any] = math.floor(year / 100 )
SCREAMING_SNAKE_CASE : int = math.floor((13 + 8 * leap_day_inhibits) / 25 )
SCREAMING_SNAKE_CASE : List[str] = leap_day_inhibits / 4
SCREAMING_SNAKE_CASE : Dict = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
SCREAMING_SNAKE_CASE : Dict = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
SCREAMING_SNAKE_CASE : Union[str, Any] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
SCREAMING_SNAKE_CASE : List[str] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowercase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowercase , 4 , 18 )
else:
return datetime(lowercase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
snake_case = """will be""" if year > datetime.now().year else """was"""
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 319 |
def lowerCamelCase__ ( lowercase , lowercase = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = length or len(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list_data[i + 1], list_data[i]
SCREAMING_SNAKE_CASE : str = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.