code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __get__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Optional[int]:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
SCREAMING_SNAKE_CASE__ : Tuple = """__cached_""" + self.fget.__name__
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if cached is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.fget(SCREAMING_SNAKE_CASE__ )
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return cached
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def lowercase_ ( _snake_case ):
if is_torch_fx_proxy(_snake_case ):
return True
if is_torch_available():
import torch
if isinstance(_snake_case ,torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_snake_case ,tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_snake_case ,(jnp.ndarray, Tracer) ):
return True
return isinstance(_snake_case ,np.ndarray )
def lowercase_ ( _snake_case ):
return isinstance(_snake_case ,np.ndarray )
def lowercase_ ( _snake_case ):
return _is_numpy(_snake_case )
def lowercase_ ( _snake_case ):
import torch
return isinstance(_snake_case ,torch.Tensor )
def lowercase_ ( _snake_case ):
return False if not is_torch_available() else _is_torch(_snake_case )
def lowercase_ ( _snake_case ):
import torch
return isinstance(_snake_case ,torch.device )
def lowercase_ ( _snake_case ):
return False if not is_torch_available() else _is_torch_device(_snake_case )
def lowercase_ ( _snake_case ):
import torch
if isinstance(_snake_case ,_snake_case ):
if hasattr(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = getattr(_snake_case ,_snake_case )
else:
return False
return isinstance(_snake_case ,torch.dtype )
def lowercase_ ( _snake_case ):
return False if not is_torch_available() else _is_torch_dtype(_snake_case )
def lowercase_ ( _snake_case ):
import tensorflow as tf
return isinstance(_snake_case ,tf.Tensor )
def lowercase_ ( _snake_case ):
return False if not is_tf_available() else _is_tensorflow(_snake_case )
def lowercase_ ( _snake_case ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_snake_case ,"""is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(_snake_case )
return type(_snake_case ) == tf.Tensor
def lowercase_ ( _snake_case ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(_snake_case )
def lowercase_ ( _snake_case ):
import jax.numpy as jnp # noqa: F811
return isinstance(_snake_case ,jnp.ndarray )
def lowercase_ ( _snake_case ):
return False if not is_flax_available() else _is_jax(_snake_case )
def lowercase_ ( _snake_case ):
if isinstance(_snake_case ,(dict, UserDict) ):
return {k: to_py_obj(_snake_case ) for k, v in obj.items()}
elif isinstance(_snake_case ,(list, tuple) ):
return [to_py_obj(_snake_case ) for o in obj]
elif is_tf_tensor(_snake_case ):
return obj.numpy().tolist()
elif is_torch_tensor(_snake_case ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_snake_case ):
return np.asarray(_snake_case ).tolist()
elif isinstance(_snake_case ,(np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase_ ( _snake_case ):
if isinstance(_snake_case ,(dict, UserDict) ):
return {k: to_numpy(_snake_case ) for k, v in obj.items()}
elif isinstance(_snake_case ,(list, tuple) ):
return np.array(_snake_case )
elif is_tf_tensor(_snake_case ):
return obj.numpy()
elif is_torch_tensor(_snake_case ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_snake_case ):
return np.asarray(_snake_case )
else:
return obj
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = fields(self )
# Safety and consistency checks
if not len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(self , class_fields[0].name )
SCREAMING_SNAKE_CASE__ : Dict = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = first_field.items()
SCREAMING_SNAKE_CASE__ : Any = True
else:
try:
SCREAMING_SNAKE_CASE__ : List[Any] = iter(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
except TypeError:
SCREAMING_SNAKE_CASE__ : List[str] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(SCREAMING_SNAKE_CASE__ ):
if (
not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
or not len(SCREAMING_SNAKE_CASE__ ) == 2
or not isinstance(element[0] , SCREAMING_SNAKE_CASE__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
SCREAMING_SNAKE_CASE__ : Dict = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = element[1]
elif first_field is not None:
SCREAMING_SNAKE_CASE__ : Any = first_field
else:
for field in class_fields:
SCREAMING_SNAKE_CASE__ : Tuple = getattr(self , field.name )
if v is not None:
SCREAMING_SNAKE_CASE__ : Dict = v
def __delitem__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
super().__setattr__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __setitem__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
super().__setitem__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class lowerCAmelCase_ (a__ , a__ ):
"""simple docstring"""
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = '''longest'''
__UpperCamelCase : List[Any] = '''max_length'''
__UpperCamelCase : List[str] = '''do_not_pad'''
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''pt'''
__UpperCamelCase : str = '''tf'''
__UpperCamelCase : List[str] = '''np'''
__UpperCamelCase : Optional[Any] = '''jax'''
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = context_managers
SCREAMING_SNAKE_CASE__ : int = ExitStack()
def __enter__(self ) -> Any:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(SCREAMING_SNAKE_CASE__ )
def __exit__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
self.stack.__exit__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = infer_framework(_snake_case )
if framework == "tf":
SCREAMING_SNAKE_CASE__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE__ : Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE__ : str = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : str = model_class.__name__
SCREAMING_SNAKE_CASE__ : List[str] = infer_framework(_snake_case )
if framework == "tf":
SCREAMING_SNAKE_CASE__ : Optional[int] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE__ : Tuple = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase_ ( _snake_case ,_snake_case = "" ,_snake_case = "." ):
def _flatten_dict(_snake_case ,_snake_case="" ,_snake_case="." ):
for k, v in d.items():
SCREAMING_SNAKE_CASE__ : str = str(_snake_case ) + delimiter + str(_snake_case ) if parent_key else k
if v and isinstance(_snake_case ,_snake_case ):
yield from flatten_dict(_snake_case ,_snake_case ,delimiter=_snake_case ).items()
else:
yield key, v
return dict(_flatten_dict(_snake_case ,_snake_case ,_snake_case ) )
@contextmanager
def lowercase_ ( _snake_case ,_snake_case = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase_ ( _snake_case ,_snake_case=None ):
if is_numpy_array(_snake_case ):
return np.transpose(_snake_case ,axes=_snake_case )
elif is_torch_tensor(_snake_case ):
return array.T if axes is None else array.permute(*_snake_case )
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.transpose(_snake_case ,perm=_snake_case )
elif is_jax_tensor(_snake_case ):
return jnp.transpose(_snake_case ,axes=_snake_case )
else:
raise ValueError(f'''Type not supported for transpose: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ,_snake_case ):
if is_numpy_array(_snake_case ):
return np.reshape(_snake_case ,_snake_case )
elif is_torch_tensor(_snake_case ):
return array.reshape(*_snake_case )
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.reshape(_snake_case ,_snake_case )
elif is_jax_tensor(_snake_case ):
return jnp.reshape(_snake_case ,_snake_case )
else:
raise ValueError(f'''Type not supported for reshape: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ,_snake_case=None ):
if is_numpy_array(_snake_case ):
return np.squeeze(_snake_case ,axis=_snake_case )
elif is_torch_tensor(_snake_case ):
return array.squeeze() if axis is None else array.squeeze(dim=_snake_case )
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.squeeze(_snake_case ,axis=_snake_case )
elif is_jax_tensor(_snake_case ):
return jnp.squeeze(_snake_case ,axis=_snake_case )
else:
raise ValueError(f'''Type not supported for squeeze: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ,_snake_case ):
if is_numpy_array(_snake_case ):
return np.expand_dims(_snake_case ,_snake_case )
elif is_torch_tensor(_snake_case ):
return array.unsqueeze(dim=_snake_case )
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.expand_dims(_snake_case ,axis=_snake_case )
elif is_jax_tensor(_snake_case ):
return jnp.expand_dims(_snake_case ,axis=_snake_case )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ):
if is_numpy_array(_snake_case ):
return np.size(_snake_case )
elif is_torch_tensor(_snake_case ):
return array.numel()
elif is_tf_tensor(_snake_case ):
import tensorflow as tf
return tf.size(_snake_case )
elif is_jax_tensor(_snake_case ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(_snake_case )}.''' )
def lowercase_ ( _snake_case ,_snake_case ):
for key, value in auto_map.items():
if isinstance(_snake_case ,(tuple, list) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
SCREAMING_SNAKE_CASE__ : Any = f'''{repo_id}--{value}'''
return auto_map
def lowercase_ ( _snake_case ):
for base_class in inspect.getmro(_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = base_class.__module__
SCREAMING_SNAKE_CASE__ : Any = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 25 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ['BeitFeatureExtractor']
UpperCAmelCase__ : Any = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
UpperCAmelCase__ : Tuple = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=19 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=[1, 2, 3, 4, 5] , SCREAMING_SNAKE_CASE__=25 , SCREAMING_SNAKE_CASE__=5 , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = d_model
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prediction_length
SCREAMING_SNAKE_CASE__ : List[str] = context_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = cardinality
SCREAMING_SNAKE_CASE__ : List[str] = num_time_features
SCREAMING_SNAKE_CASE__ : Optional[int] = lags_sequence
SCREAMING_SNAKE_CASE__ : Dict = embedding_dimension
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = context_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction_length + label_length
SCREAMING_SNAKE_CASE__ : List[Any] = label_length
SCREAMING_SNAKE_CASE__ : Tuple = moving_average
SCREAMING_SNAKE_CASE__ : Union[str, Any] = autocorrelation_factor
def __magic_name__ (self ) -> Any:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = config.context_length + max(config.lags_sequence )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, _past_length] )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, config.prediction_length] )
SCREAMING_SNAKE_CASE__ : str = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_config()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = AutoformerModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = model.create_network_inputs(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
SCREAMING_SNAKE_CASE__ : Tuple = encoder(inputs_embeds=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
SCREAMING_SNAKE_CASE__ : List[Any] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
SCREAMING_SNAKE_CASE__ : str = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = decoder(
trend=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCamelCase : Optional[int] = (AutoformerForPrediction,) if is_torch_available() else ()
__UpperCamelCase : int = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
__UpperCamelCase : Any = False
__UpperCamelCase : int = False
__UpperCamelCase : str = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : int = False
__UpperCamelCase : List[str] = False
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoformerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , output_loading_info=SCREAMING_SNAKE_CASE__ )
self.assertEqual(info["""missing_keys"""] , [] )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = inspect.signature(getattr(SCREAMING_SNAKE_CASE__ , """forward""" ) )
# The main input is the name of the argument after `self`
SCREAMING_SNAKE_CASE__ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE__ )] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(self.model_tester , """seq_length""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = getattr(self.model_tester , """decoder_seq_length""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = getattr(self.model_tester , """d_model""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(self.model_tester , """num_attention_heads""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = d_model // num_attention_heads
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# decoder attentions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
SCREAMING_SNAKE_CASE__ : List[str] = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : str = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowercase_ ( _snake_case="train-batch.pt" ):
SCREAMING_SNAKE_CASE__ : List[str] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" ,filename=_snake_case ,repo_type="""dataset""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.load(_snake_case ,map_location=_snake_case )
return batch
@require_torch
@slow
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = prepare_batch()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
SCREAMING_SNAKE_CASE__ : int = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE__ , rtol=1E-1 ) )
| 25 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return int((input_a, input_a).count(0 ) != 0 )
def lowercase_ ( ):
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 25 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,):
SCREAMING_SNAKE_CASE__ : Any = len(_snake_case )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_snake_case ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,_snake_case ,_snake_case ,)
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : list[list[str]] = []
depth_first_search([] ,[] ,[] ,_snake_case ,_snake_case )
# Print all the boards
for board in boards:
for column in board:
print(_snake_case )
print("""""" )
print(len(_snake_case ) ,"""solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 25 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def lowercase_ ( _snake_case ,_snake_case ):
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : int = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = set()
# edges = list of graph's edges
SCREAMING_SNAKE_CASE__ : List[Any] = get_edges(_snake_case )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = edges.pop()
chosen_vertices.add(_snake_case )
chosen_vertices.add(_snake_case )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_snake_case )
return chosen_vertices
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 25 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def lowercase_ ( _snake_case ):
create_state_space_tree(_snake_case ,[] ,0 )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if index == len(_snake_case ):
print(_snake_case )
return
create_state_space_tree(_snake_case ,_snake_case ,index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_snake_case ,_snake_case ,index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
UpperCAmelCase__ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 25 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = params
SCREAMING_SNAKE_CASE__ : List[str] = np.array(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([len(SCREAMING_SNAKE_CASE__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__(self ) -> int:
"""simple docstring"""
return len(self.lengths )
def __magic_name__ (self ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.params.max_model_input_size
SCREAMING_SNAKE_CASE__ : Dict = self.lengths > max_len
logger.info(F'''Splitting {sum(SCREAMING_SNAKE_CASE__ )} too long sequences.''' )
def divide_chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Any = []
if self.params.mlm:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE__ : int = np.insert(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ )
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE__ : str = np.insert(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE__ )
new_tok_ids.extend(SCREAMING_SNAKE_CASE__ )
new_lengths.extend([len(SCREAMING_SNAKE_CASE__ ) for l in sub_seqs] )
SCREAMING_SNAKE_CASE__ : Dict = np.array(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = np.array(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = len(self )
SCREAMING_SNAKE_CASE__ : List[Any] = self.lengths > 11
SCREAMING_SNAKE_CASE__ : Dict = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.lengths[indices]
SCREAMING_SNAKE_CASE__ : str = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(self )
SCREAMING_SNAKE_CASE__ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
SCREAMING_SNAKE_CASE__ : List[str] = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE__ : List[str] = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ : List[Any] = self.lengths[indices]
SCREAMING_SNAKE_CASE__ : List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [t[0] for t in batch]
SCREAMING_SNAKE_CASE__ : Tuple = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
# Max for paddings
SCREAMING_SNAKE_CASE__ : int = max(SCREAMING_SNAKE_CASE__ )
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE__ : Any = self.params.special_tok_ids["""pad_token"""]
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [list(t.astype(SCREAMING_SNAKE_CASE__ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE__ )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE__ )
assert all(len(SCREAMING_SNAKE_CASE__ ) == max_seq_len_ for t in tk_ )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(SCREAMING_SNAKE_CASE__ ) # (bs)
return tk_t, lg_t
| 25 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 1 |
"""simple docstring"""
import requests
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""Content-Type""": """application/json"""}
SCREAMING_SNAKE_CASE__ : str = requests.post(_snake_case ,json={"""text""": message_body} ,headers=_snake_case )
if response.status_code != 200:
SCREAMING_SNAKE_CASE__ : List[Any] = (
"""Request to slack returned an error """
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(_snake_case )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 25 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if len(_snake_case ) < 2:
return collection
def circle_sort_util(_snake_case ,_snake_case ,_snake_case ) -> bool:
SCREAMING_SNAKE_CASE__ : List[Any] = False
if low == high:
return swapped
SCREAMING_SNAKE_CASE__ : Optional[Any] = low
SCREAMING_SNAKE_CASE__ : int = high
while left < right:
if collection[left] > collection[right]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = (
collection[right],
collection[left],
)
SCREAMING_SNAKE_CASE__ : Dict = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = (
collection[right + 1],
collection[left],
)
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = low + int((high - low) / 2 )
SCREAMING_SNAKE_CASE__ : Tuple = circle_sort_util(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = circle_sort_util(_snake_case ,mid + 1 ,_snake_case )
return swapped or left_swap or right_swap
SCREAMING_SNAKE_CASE__ : List[str] = True
while is_not_sorted is True:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = circle_sort_util(_snake_case ,0 ,len(_snake_case ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Tuple = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 25 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCamelCase : Optional[str] = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCamelCase : bool = field(default=a__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__UpperCamelCase : Optional[str] = field(
default=a__ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__UpperCamelCase : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__UpperCamelCase : bool = field(
default=a__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowercase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
SCREAMING_SNAKE_CASE__ : Any = import_module("""tasks""" )
try:
SCREAMING_SNAKE_CASE__ : int = getattr(_snake_case ,model_args.task_type )
SCREAMING_SNAKE_CASE__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1 ) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" ,_snake_case )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
SCREAMING_SNAKE_CASE__ : Dict = token_classification_task.get_labels(data_args.labels )
SCREAMING_SNAKE_CASE__ : Dict[int, str] = dict(enumerate(_snake_case ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid={label: i for i, label in enumerate(_snake_case )} ,cache_dir=model_args.cache_dir ,)
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast ,)
SCREAMING_SNAKE_CASE__ : List[str] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=_snake_case ,cache_dir=model_args.cache_dir ,)
# Get datasets
SCREAMING_SNAKE_CASE__ : Tuple = (
TokenClassificationDataset(
token_classification_task=_snake_case ,data_dir=data_args.data_dir ,tokenizer=_snake_case ,labels=_snake_case ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE__ : Any = (
TokenClassificationDataset(
token_classification_task=_snake_case ,data_dir=data_args.data_dir ,tokenizer=_snake_case ,labels=_snake_case ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def align_predictions(_snake_case ,_snake_case ) -> Tuple[List[int], List[int]]:
SCREAMING_SNAKE_CASE__ : Dict = np.argmax(_snake_case ,axis=2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = preds.shape
SCREAMING_SNAKE_CASE__ : int = [[] for _ in range(_snake_case )]
SCREAMING_SNAKE_CASE__ : List[str] = [[] for _ in range(_snake_case )]
for i in range(_snake_case ):
for j in range(_snake_case ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_snake_case ) -> Dict:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = align_predictions(p.predictions ,p.label_ids )
return {
"accuracy_score": accuracy_score(_snake_case ,_snake_case ),
"precision": precision_score(_snake_case ,_snake_case ),
"recall": recall_score(_snake_case ,_snake_case ),
"f1": fa_score(_snake_case ,_snake_case ),
}
# Data collator
SCREAMING_SNAKE_CASE__ : List[Any] = DataCollatorWithPadding(_snake_case ,pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ : Optional[Any] = Trainer(
model=_snake_case ,args=_snake_case ,train_dataset=_snake_case ,eval_dataset=_snake_case ,compute_metrics=_snake_case ,data_collator=_snake_case ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ : Tuple = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
SCREAMING_SNAKE_CASE__ : Dict = trainer.evaluate()
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(training_args.output_dir ,"""eval_results.txt""" )
if trainer.is_world_process_zero():
with open(_snake_case ,"""w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" ,_snake_case ,_snake_case )
writer.write("""%s = %s\n""" % (key, value) )
results.update(_snake_case )
# Predict
if training_args.do_predict:
SCREAMING_SNAKE_CASE__ : List[str] = TokenClassificationDataset(
token_classification_task=_snake_case ,data_dir=data_args.data_dir ,tokenizer=_snake_case ,labels=_snake_case ,model_type=config.model_type ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.test ,)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = trainer.predict(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = align_predictions(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(training_args.output_dir ,"""test_results.txt""" )
if trainer.is_world_process_zero():
with open(_snake_case ,"""w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" ,_snake_case ,_snake_case )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(training_args.output_dir ,"""test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(_snake_case ,"""w""" ) as writer:
with open(os.path.join(data_args.data_dir ,"""test.txt""" ) ,"""r""" ) as f:
token_classification_task.write_predictions_to_file(_snake_case ,_snake_case ,_snake_case )
return results
def lowercase_ ( _snake_case ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 25 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : Any = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = KandinskyVaaInpaintPipeline
__UpperCamelCase : List[str] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__UpperCamelCase : List[Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__UpperCamelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__UpperCamelCase : Any = False
@property
def __magic_name__ (self ) -> Any:
"""simple docstring"""
return 32
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 32
@property
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return 1_00
@property
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE__ : Any = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_movq
SCREAMING_SNAKE_CASE__ : List[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
SCREAMING_SNAKE_CASE__ : str = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = """cpu"""
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = output.images
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.ones((7_68, 7_68) , dtype=np.floataa )
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : str = """a hat"""
SCREAMING_SNAKE_CASE__ : int = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE__ : Tuple = pipeline(
image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , image_embeds=SCREAMING_SNAKE_CASE__ , negative_image_embeds=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Any = filter(lambda _snake_case : p.requires_grad ,model.parameters() )
SCREAMING_SNAKE_CASE__ : Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ : int = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ):
if metric == "rouge2":
SCREAMING_SNAKE_CASE__ : Any = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
SCREAMING_SNAKE_CASE__ : Tuple = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
SCREAMING_SNAKE_CASE__ : Tuple = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = ModelCheckpoint(
dirpath=_snake_case ,filename=_snake_case ,monitor=f'''val_{metric}''' ,mode="""max""" ,save_top_k=3 ,every_n_epochs=1 ,)
return checkpoint_callback
def lowercase_ ( _snake_case ,_snake_case ):
return EarlyStopping(
monitor=f'''val_{metric}''' ,mode="""min""" if """loss""" in metric else """max""" ,patience=_snake_case ,verbose=_snake_case ,)
class lowerCAmelCase_ (pl.Callback ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ) -> None:
"""simple docstring"""
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
SCREAMING_SNAKE_CASE__ : Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
SCREAMING_SNAKE_CASE__ : Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE__ : Optional[Any] = od / """test_results.txt"""
SCREAMING_SNAKE_CASE__ : Tuple = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE__ : List[str] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
SCREAMING_SNAKE_CASE__ : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """a+""" ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE__ : Any = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val.item()
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''{key}: {val:.6f}\n'''
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE__ : str = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE__ : Tuple = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE__ : int = count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """test""" )
@rank_zero_only
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 25 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """num_attention_heads""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=6_40 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__="silu" , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : int = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = last_hidden_size
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = conv_kernel_size
SCREAMING_SNAKE_CASE__ : int = output_stride
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : str = scope
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ (self ) -> str:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = MobileViTModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Dict = MobileViTForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Any = False
__UpperCamelCase : int = False
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE__ : Dict = MobileViTConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ (self ) -> int:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : str = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE__ : Tuple = 2
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = MobileViTModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE__ : int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Tuple = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''gptj'''
__UpperCamelCase : Optional[int] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__(self , SCREAMING_SNAKE_CASE__=5_04_00 , SCREAMING_SNAKE_CASE__=20_48 , SCREAMING_SNAKE_CASE__=40_96 , SCREAMING_SNAKE_CASE__=28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=64 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="gelu_new" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=5_02_56 , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = n_positions
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_embd
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_layer
SCREAMING_SNAKE_CASE__ : Optional[Any] = n_head
SCREAMING_SNAKE_CASE__ : Any = n_inner
SCREAMING_SNAKE_CASE__ : str = rotary_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE__ : Tuple = resid_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = embd_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : Any = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : Dict = bos_token_id
SCREAMING_SNAKE_CASE__ : str = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "default" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , ) -> str:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , task=SCREAMING_SNAKE_CASE__ , patching_specs=SCREAMING_SNAKE_CASE__ , use_past=SCREAMING_SNAKE_CASE__ )
if not getattr(self._config , """pad_token_id""" , SCREAMING_SNAKE_CASE__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE__ : List[str] = 0
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ , direction="""inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self._config.n_head
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = super(SCREAMING_SNAKE_CASE__ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Optional[Any] = seqlen + 2
SCREAMING_SNAKE_CASE__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = [
(torch.zeros(SCREAMING_SNAKE_CASE__ ), torch.zeros(SCREAMING_SNAKE_CASE__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE__ : List[Any] = common_inputs["""attention_mask"""]
if self.use_past:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )] , dim=1 )
return ordered_inputs
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 13
| 25 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE__ : Any = """_"""
if count > 1:
return False
else:
return "".join(_snake_case )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = []
while True:
SCREAMING_SNAKE_CASE__ : List[Any] = ["""$"""] * len(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i in range(len(_snake_case ) ):
for j in range(i + 1 ,len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : Any = compare_string(binary[i] ,binary[j] )
if k is False:
SCREAMING_SNAKE_CASE__ : Tuple = """*"""
SCREAMING_SNAKE_CASE__ : int = """*"""
temp.append("""X""" )
for i in range(len(_snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_snake_case ) == 0:
return pi
SCREAMING_SNAKE_CASE__ : List[str] = list(set(_snake_case ) )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = []
for minterm in minterms:
SCREAMING_SNAKE_CASE__ : Optional[int] = """"""
for _ in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(_snake_case )
return temp
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = list(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(_snake_case )
SCREAMING_SNAKE_CASE__ : str = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : List[str] = [0] * len(_snake_case )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
for j in range(len(_snake_case ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE__ : List[str] = j
if count == 1:
SCREAMING_SNAKE_CASE__ : Any = 1
for i in range(len(_snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : List[Any] = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : str = -1
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for i in range(len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : str = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = count_n
SCREAMING_SNAKE_CASE__ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : Dict = 0
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = [[0 for x in range(len(_snake_case ) )] for x in range(len(_snake_case ) )]
for i in range(len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = prime_implicants[i].count("""_""" )
for j in range(len(_snake_case ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = 1
return chart
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(input("""Enter the no. of variables\n""" ) )
SCREAMING_SNAKE_CASE__ : Tuple = [
float(_snake_case )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decimal_to_binary(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : str = check(_snake_case )
print("""Prime Implicants are:""" )
print(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prime_implicant_chart(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = selection(_snake_case ,_snake_case )
print("""Essential Prime Implicants are:""" )
print(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 25 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 1 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a__ , )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = RobertaConfig
__UpperCamelCase : Optional[Any] = '''roberta'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = RobertaEmbeddings(SCREAMING_SNAKE_CASE__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , a__ , )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = RobertaConfig
__UpperCamelCase : List[str] = '''roberta'''
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = config.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = config.num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = DeeRobertaModel(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=-1 , SCREAMING_SNAKE_CASE__=False , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_layers
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.roberta(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , inputs_embeds=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = outputs[1]
SCREAMING_SNAKE_CASE__ : str = self.dropout(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.classifier(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE__ : Optional[Any] = e.message
SCREAMING_SNAKE_CASE__ : Optional[int] = e.exit_layer
SCREAMING_SNAKE_CASE__ : Any = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE__ : List[Any] = entropy(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MSELoss()
SCREAMING_SNAKE_CASE__ : Optional[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE__ : List[Any] = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(SCREAMING_SNAKE_CASE__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE__ : Any = MSELoss()
SCREAMING_SNAKE_CASE__ : Any = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE__ : Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(SCREAMING_SNAKE_CASE__ )
if train_highway:
SCREAMING_SNAKE_CASE__ : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE__ : List[str] = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE__ : str = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE__ : str = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 25 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
import os
import sys
import unittest
UpperCAmelCase__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
UpperCAmelCase__ : List[str] = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
UpperCAmelCase__ : Dict = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""BertModelTest""": """BertModelTester"""}
SCREAMING_SNAKE_CASE__ : Any = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = get_model_to_test_mapping(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
SCREAMING_SNAKE_CASE__ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
SCREAMING_SNAKE_CASE__ : int = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : int
__UpperCamelCase : TreeNode | None = None
__UpperCamelCase : TreeNode | None = None
UpperCAmelCase__ : Optional[int] = namedtuple('CoinsDistribResult', 'moves excess')
def lowercase_ ( _snake_case ):
if root is None:
return 0
# Validation
def count_nodes(_snake_case ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_snake_case ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_snake_case ) != count_coins(_snake_case ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(_snake_case ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = get_distrib(node.left )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_distrib(node.right )
SCREAMING_SNAKE_CASE__ : str = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE__ : int = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE__ : Tuple = (
left_distrib_moves
+ right_distrib_moves
+ abs(_snake_case )
+ abs(_snake_case )
)
SCREAMING_SNAKE_CASE__ : Any = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_snake_case ,_snake_case )
return get_distrib(_snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[Any] = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = '''dpt'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=3_84 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[2, 5, 8, 11] , SCREAMING_SNAKE_CASE__="project" , SCREAMING_SNAKE_CASE__=[4, 2, 1, 0.5] , SCREAMING_SNAKE_CASE__=[96, 1_92, 3_84, 7_68] , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=-1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.4 , SCREAMING_SNAKE_CASE__=2_55 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=[1, 10_24, 24, 24] , SCREAMING_SNAKE_CASE__=[0, 1] , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
SCREAMING_SNAKE_CASE__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = BitConfig(**SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
SCREAMING_SNAKE_CASE__ : str = BitConfig(**SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
SCREAMING_SNAKE_CASE__ : Dict = backbone_featmap_shape
SCREAMING_SNAKE_CASE__ : List[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Any = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
SCREAMING_SNAKE_CASE__ : Dict = readout_type
SCREAMING_SNAKE_CASE__ : Optional[int] = reassemble_factors
SCREAMING_SNAKE_CASE__ : List[str] = neck_hidden_sizes
SCREAMING_SNAKE_CASE__ : Dict = fusion_hidden_size
SCREAMING_SNAKE_CASE__ : int = head_in_index
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Tuple = use_auxiliary_head
SCREAMING_SNAKE_CASE__ : List[Any] = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : Tuple = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE__ : List[str] = semantic_classifier_dropout
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : str = self.__class__.model_type
return output
| 25 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 1 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 25 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''wavlm'''
def __init__(self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1_28 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3_20 , SCREAMING_SNAKE_CASE__=8_00 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.05 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=3_20 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=(5_12, 5_12, 5_12, 5_12, 15_00) , SCREAMING_SNAKE_CASE__=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE__=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=80 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Any = feat_extract_norm
SCREAMING_SNAKE_CASE__ : str = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Any = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = conv_bias
SCREAMING_SNAKE_CASE__ : Dict = num_buckets
SCREAMING_SNAKE_CASE__ : List[str] = max_bucket_distance
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Tuple = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : Any = final_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_ctc_classes
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ : List[str] = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_prob
SCREAMING_SNAKE_CASE__ : Any = mask_time_length
SCREAMING_SNAKE_CASE__ : Tuple = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_feature_prob
SCREAMING_SNAKE_CASE__ : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ : Any = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ : str = num_codevector_groups
SCREAMING_SNAKE_CASE__ : Tuple = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ : Any = num_negatives
SCREAMING_SNAKE_CASE__ : Dict = codevector_dim
SCREAMING_SNAKE_CASE__ : List[str] = proj_codevector_dim
SCREAMING_SNAKE_CASE__ : Union[str, Any] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : Tuple = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : Dict = add_adapter
SCREAMING_SNAKE_CASE__ : List[str] = adapter_kernel_size
SCREAMING_SNAKE_CASE__ : Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE__ : List[str] = num_adapter_layers
SCREAMING_SNAKE_CASE__ : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : List[str] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = xvector_output_dim
@property
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 25 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : str = SwinvaConfig()
SCREAMING_SNAKE_CASE__ : str = swinva_name.split("""_""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name_split[1]
if "to" in name_split[3]:
SCREAMING_SNAKE_CASE__ : Dict = int(name_split[3][-3:] )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
SCREAMING_SNAKE_CASE__ : List[Any] = int(name_split[2][-2:] )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = int(name_split[2][6:] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : str = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Dict = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE__ : List[Any] = 128
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Any = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = 192
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Dict = (6, 12, 24, 48)
if "to" in swinva_name:
SCREAMING_SNAKE_CASE__ : str = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
SCREAMING_SNAKE_CASE__ : Dict = 21_841
SCREAMING_SNAKE_CASE__ : str = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : Any = """imagenet-22k-id2label.json"""
SCREAMING_SNAKE_CASE__ : List[Any] = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : List[str] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = idalabel
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE__ : Dict = 1_000
SCREAMING_SNAKE_CASE__ : Any = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : int = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = img_size
SCREAMING_SNAKE_CASE__ : Dict = num_classes
SCREAMING_SNAKE_CASE__ : Tuple = embed_dim
SCREAMING_SNAKE_CASE__ : str = depths
SCREAMING_SNAKE_CASE__ : str = num_heads
SCREAMING_SNAKE_CASE__ : List[str] = window_size
return config
def lowercase_ ( _snake_case ):
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""patch_embed.norm""" ,"""embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE__ : Tuple = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ : Dict = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ : List[Any] = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "q_bias" in name:
SCREAMING_SNAKE_CASE__ : int = name.replace("""q_bias""" ,"""query.bias""" )
if "k_bias" in name:
SCREAMING_SNAKE_CASE__ : Dict = name.replace("""k_bias""" ,"""key.bias""" )
if "v_bias" in name:
SCREAMING_SNAKE_CASE__ : str = name.replace("""v_bias""" ,"""value.bias""" )
if "cpb_mlp" in name:
SCREAMING_SNAKE_CASE__ : Dict = name.replace("""cpb_mlp""" ,"""continuous_position_bias_mlp""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE__ : Tuple = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE__ : Dict = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE__ : Any = name.replace("""head""" ,"""classifier""" )
else:
SCREAMING_SNAKE_CASE__ : str = """swinv2.""" + name
return name
def lowercase_ ( _snake_case ,_snake_case ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Any = orig_state_dict.pop(_snake_case )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE__ : int = key.split(""".""" )
SCREAMING_SNAKE_CASE__ : Any = int(key_split[1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(key_split[3] )
SCREAMING_SNAKE_CASE__ : Any = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = val[:dim, :]
SCREAMING_SNAKE_CASE__ : Any = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ : List[str] = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ : Any = val[:dim]
SCREAMING_SNAKE_CASE__ : int = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Optional[int] = val[-dim:]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val
return orig_state_dict
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = timm.create_model(_snake_case ,pretrained=_snake_case )
timm_model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_swinva_config(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = SwinvaForImageClassification(_snake_case )
model.eval()
SCREAMING_SNAKE_CASE__ : str = convert_state_dict(timm_model.state_dict() ,_snake_case )
model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" ,"""-""" ) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw )
SCREAMING_SNAKE_CASE__ : Dict = image_processor(images=_snake_case ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : int = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE__ : Dict = model(**_snake_case ).logits
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print(f'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
model.push_to_hub(
repo_path_or_name=Path(_snake_case ,_snake_case ) ,organization="""nandwalritik""" ,commit_message="""Add model""" ,)
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase__ : int = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 25 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 8
# DPR tok
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE__ : Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE__ : Tuple = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : int = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __magic_name__ (self ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
SCREAMING_SNAKE_CASE__ : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
SCREAMING_SNAKE_CASE__ : int = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
SCREAMING_SNAKE_CASE__ : int = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = data
SCREAMING_SNAKE_CASE__ : Node | None = None
SCREAMING_SNAKE_CASE__ : Node | None = None
def lowercase_ ( _snake_case ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowercase_ ( _snake_case ):
return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0
def lowercase_ ( _snake_case ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowercase_ ( ): # Main function for testing.
SCREAMING_SNAKE_CASE__ : Optional[Any] = Node(1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Node(2 )
SCREAMING_SNAKE_CASE__ : Tuple = Node(3 )
SCREAMING_SNAKE_CASE__ : List[Any] = Node(4 )
SCREAMING_SNAKE_CASE__ : str = Node(5 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Node(6 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Node(7 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = Node(8 )
SCREAMING_SNAKE_CASE__ : Any = Node(9 )
print(is_full_binary_tree(_snake_case ) )
print(depth_of_tree(_snake_case ) )
print("""Tree is: """ )
display(_snake_case )
if __name__ == "__main__":
main()
| 25 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase_ :
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return None
class lowerCAmelCase_ :
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return None
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __magic_name__ (self ) -> int:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE__ , """tf""" , 12 , **SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE__ , """pt""" , 12 , **SCREAMING_SNAKE_CASE__ )
@require_torch
@slow
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(SCREAMING_SNAKE_CASE__ ) )
vocab_file.flush()
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
SCREAMING_SNAKE_CASE__ : int = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE__ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
self._test_export(SCREAMING_SNAKE_CASE__ , """pt""" , 12 , SCREAMING_SNAKE_CASE__ )
@require_tf
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ : Any = self._test_export(SCREAMING_SNAKE_CASE__ , """tf""" , 12 , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = quantize(Path(SCREAMING_SNAKE_CASE__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE__ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._test_export(SCREAMING_SNAKE_CASE__ , """pt""" , 12 , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = quantize(SCREAMING_SNAKE_CASE__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE__ ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(SCREAMING_SNAKE_CASE__ ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE__ )
@require_torch
@require_tokenizers
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
from transformers import BertModel
SCREAMING_SNAKE_CASE__ : Dict = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
SCREAMING_SNAKE_CASE__ : Any = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """pt""" )
@require_tf
@require_tokenizers
@slow
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
from transformers import TFBertModel
SCREAMING_SNAKE_CASE__ : Dict = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
SCREAMING_SNAKE_CASE__ : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """tf""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = infer_shapes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE__ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
SCREAMING_SNAKE_CASE__ : List[str] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE__ ) , set(SCREAMING_SNAKE_CASE__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE__ , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 25 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 1 |
"""simple docstring"""
from math import isqrt
def lowercase_ ( _snake_case ):
return all(number % divisor != 0 for divisor in range(2 ,isqrt(_snake_case ) + 1 ) )
def lowercase_ ( _snake_case = 10**6 ):
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(_snake_case )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 25 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : List[str] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 25 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 1 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowercase_ ( _snake_case = 3 ):
if isinstance(_snake_case ,_snake_case ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(_snake_case ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
SCREAMING_SNAKE_CASE__ : str = QuantumRegister(_snake_case ,"""qr""" )
SCREAMING_SNAKE_CASE__ : Dict = ClassicalRegister(_snake_case ,"""cr""" )
SCREAMING_SNAKE_CASE__ : Dict = QuantumCircuit(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : int = number_of_qubits
for i in range(_snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,_snake_case ,_snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_snake_case ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_snake_case ,_snake_case )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend("""qasm_simulator""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = execute(_snake_case ,_snake_case ,shots=10_000 )
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 25 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
SCREAMING_SNAKE_CASE__ : str = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
SCREAMING_SNAKE_CASE__ : int = """The dog is cute and lives in the garden house"""
SCREAMING_SNAKE_CASE__ : Any = jnp.array([tokenizer.encode(SCREAMING_SNAKE_CASE__ )] )
SCREAMING_SNAKE_CASE__ : Dict = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )["""last_hidden_state"""]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) )
| 25 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ : int = 1_6
UpperCAmelCase__ : int = 3_2
def lowercase_ ( _snake_case ,_snake_case = 16 ):
SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Tuple = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(_snake_case ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=_snake_case ,max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
_snake_case ,batched=_snake_case ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Any = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(_snake_case ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Any = 8
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
return tokenizer.pad(
_snake_case ,padding="""longest""" ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DataLoader(
tokenized_datasets["""train"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case )
SCREAMING_SNAKE_CASE__ : int = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=_snake_case ,collate_fn=_snake_case ,batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ : Union[str, Any] = mocked_dataloaders # noqa: F811
def lowercase_ ( _snake_case ,_snake_case ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,_snake_case ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : int = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : List[str] = config["""lr"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ : Tuple = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : Any = MAX_GPU_BATCH_SIZE
set_seed(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dataloaders(_snake_case ,_snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Dict = AdamW(params=model.parameters() ,lr=_snake_case )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Dict = get_linear_schedule_with_warmup(
optimizer=_snake_case ,num_warmup_steps=100 ,num_training_steps=(len(_snake_case ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.prepare(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ : List[str] = model(**_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.loss
SCREAMING_SNAKE_CASE__ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = accelerator.gather((predictions, batch["""labels"""]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_snake_case ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
SCREAMING_SNAKE_CASE__ : Any = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_snake_case ,references=_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=_snake_case ,default=_snake_case ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[str] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_snake_case ,_snake_case )
if __name__ == "__main__":
main()
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase_ ( _snake_case ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_snake_case ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
SCREAMING_SNAKE_CASE__ : Tuple = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
SCREAMING_SNAKE_CASE__ : Tuple = PipelineDataFormat.from_str(
format=_snake_case ,output_path=args.output ,input_path=args.input ,column=args.column if args.column else nlp.default_input_names ,overwrite=args.overwrite ,)
return RunCommand(_snake_case ,_snake_case )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = nlp
SCREAMING_SNAKE_CASE__ : Tuple = reader
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=SCREAMING_SNAKE_CASE__ , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=SCREAMING_SNAKE_CASE__ , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=SCREAMING_SNAKE_CASE__ , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=SCREAMING_SNAKE_CASE__ , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=SCREAMING_SNAKE_CASE__ , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=SCREAMING_SNAKE_CASE__ , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=SCREAMING_SNAKE_CASE__ , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE__ : List[Any] = nlp(**SCREAMING_SNAKE_CASE__ ) if self._reader.is_multi_columns else nlp(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
outputs.append(SCREAMING_SNAKE_CASE__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE__ : Tuple = self._reader.save_binary(SCREAMING_SNAKE_CASE__ )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : Dict = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ : Any = {
'roberta-base': 5_1_2,
'roberta-large': 5_1_2,
'roberta-large-mnli': 5_1_2,
'distilroberta-base': 5_1_2,
'roberta-base-openai-detector': 5_1_2,
'roberta-large-openai-detector': 5_1_2,
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : str = RobertaTokenizer
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ : str = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = add_prefix_space
SCREAMING_SNAKE_CASE__ : Dict = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = add_prefix_space
SCREAMING_SNAKE_CASE__ : List[Any] = """post_processor"""
SCREAMING_SNAKE_CASE__ : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE__ : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE__ : Dict = tuple(state["""sep"""] )
if "cls" in state:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tuple(state["""cls"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
if state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE__ : int = add_prefix_space
SCREAMING_SNAKE_CASE__ : Optional[int] = True
if state.get("""trim_offsets""" , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
SCREAMING_SNAKE_CASE__ : int = trim_offsets
SCREAMING_SNAKE_CASE__ : List[Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 25 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 1 |
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = 0 # The first color of the flag.
UpperCAmelCase__ : int = 1 # The second color of the flag.
UpperCAmelCase__ : Tuple = 2 # The third color of the flag.
UpperCAmelCase__ : List[str] = (red, white, blue)
def lowercase_ ( _snake_case ):
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = len(_snake_case ) - 1
SCREAMING_SNAKE_CASE__ : Any = 0
while mid <= high:
if sequence[mid] == colors[0]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = sequence[high], sequence[mid]
high -= 1
else:
SCREAMING_SNAKE_CASE__ : Any = f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ : Optional[int] = input('Enter numbers separated by commas:\n').strip()
UpperCAmelCase__ : List[str] = [int(item.strip()) for item in user_input.split(',')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 25 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 1 |
"""simple docstring"""
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
SCREAMING_SNAKE_CASE__ : Optional[int] = 6
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : List[str] = 1_901
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
SCREAMING_SNAKE_CASE__ : str = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
SCREAMING_SNAKE_CASE__ : List[str] = day - days_per_month[month - 2]
if month > 12:
year += 1
SCREAMING_SNAKE_CASE__ : List[str] = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
UpperCAmelCase__ : List[Any] = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
UpperCAmelCase__ : Union[str, Any] = {
'jukebox': 5_1_2,
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
__UpperCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=["v3", "v2", "v2"] , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__="<|endoftext|>" , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
super().__init__(
unk_token=SCREAMING_SNAKE_CASE__ , n_genres=SCREAMING_SNAKE_CASE__ , version=SCREAMING_SNAKE_CASE__ , max_n_lyric_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Any = version
SCREAMING_SNAKE_CASE__ : int = max_n_lyric_tokens
SCREAMING_SNAKE_CASE__ : List[str] = n_genres
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : List[str] = json.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
SCREAMING_SNAKE_CASE__ : int = oov.replace(r"""\-'""" , r"""\-+'""" )
SCREAMING_SNAKE_CASE__ : Any = regex.compile(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in self.artists_encoder.items()}
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.genres_encoder.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [self.artists_encoder.get(SCREAMING_SNAKE_CASE__ , 0 ) for artist in list_artists]
for genres in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Any = [self.genres_encoder.get(SCREAMING_SNAKE_CASE__ , 0 ) for genre in list_genres[genres]]
SCREAMING_SNAKE_CASE__ : Any = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
SCREAMING_SNAKE_CASE__ : int = [[self.lyrics_encoder.get(SCREAMING_SNAKE_CASE__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
return list(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.prepare_for_tokenization(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._tokenize(SCREAMING_SNAKE_CASE__ )
return artist, genre, lyrics
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> Tuple[str, str, str, Dict[str, Any]]:
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
SCREAMING_SNAKE_CASE__ : List[str] = artists[idx].lower()
SCREAMING_SNAKE_CASE__ : int = [genres[idx].lower()]
else:
SCREAMING_SNAKE_CASE__ : Tuple = self._normalize(artists[idx] ) + """.v2"""
SCREAMING_SNAKE_CASE__ : int = [
self._normalize(SCREAMING_SNAKE_CASE__ ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
SCREAMING_SNAKE_CASE__ : Tuple = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
SCREAMING_SNAKE_CASE__ : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
SCREAMING_SNAKE_CASE__ : List[str] = {vocab[index]: index + 1 for index in range(len(SCREAMING_SNAKE_CASE__ ) )}
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Dict = len(SCREAMING_SNAKE_CASE__ ) + 1
SCREAMING_SNAKE_CASE__ : List[Any] = self.vocab
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.vocab.items()}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
else:
SCREAMING_SNAKE_CASE__ : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
SCREAMING_SNAKE_CASE__ : Tuple = self._run_strip_accents(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = lyrics.replace("""\\""" , """\n""" )
SCREAMING_SNAKE_CASE__ : Tuple = self.out_of_vocab.sub("""""" , SCREAMING_SNAKE_CASE__ ), [], []
return artists, genres, lyrics
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = unicodedata.normalize("""NFD""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for char in text:
SCREAMING_SNAKE_CASE__ : Optional[int] = unicodedata.category(SCREAMING_SNAKE_CASE__ )
if cat == "Mn":
continue
output.append(SCREAMING_SNAKE_CASE__ )
return "".join(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = (
[chr(SCREAMING_SNAKE_CASE__ ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(SCREAMING_SNAKE_CASE__ ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(SCREAMING_SNAKE_CASE__ ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
SCREAMING_SNAKE_CASE__ : Dict = frozenset(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r"""_+""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = """""".join([c if c in accepted else """_""" for c in text.lower()] )
SCREAMING_SNAKE_CASE__ : str = pattern.sub("""_""" , SCREAMING_SNAKE_CASE__ ).strip("""_""" )
return text
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return " ".join(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[str]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = TensorType(SCREAMING_SNAKE_CASE__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : str = tf.constant
SCREAMING_SNAKE_CASE__ : Tuple = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor
SCREAMING_SNAKE_CASE__ : Any = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
SCREAMING_SNAKE_CASE__ : int = jnp.array
SCREAMING_SNAKE_CASE__ : Dict = _is_jax
else:
SCREAMING_SNAKE_CASE__ : Any = np.asarray
SCREAMING_SNAKE_CASE__ : Dict = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
SCREAMING_SNAKE_CASE__ : Any = [inputs]
if not is_tensor(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = as_tensor(SCREAMING_SNAKE_CASE__ )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="" , SCREAMING_SNAKE_CASE__="pt" ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [0, 0, 0]
SCREAMING_SNAKE_CASE__ : Optional[int] = [artist] * len(self.version )
SCREAMING_SNAKE_CASE__ : int = [genres] * len(self.version )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self._convert_token_to_id(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = [-INFINITY] * len(full_tokens[-1] )
SCREAMING_SNAKE_CASE__ : str = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=SCREAMING_SNAKE_CASE__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=SCREAMING_SNAKE_CASE__ ) )
return (artists_file, genres_file, lyrics_file)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.artists_decoder.get(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = [self.genres_decoder.get(SCREAMING_SNAKE_CASE__ ) for genre in genres_index]
SCREAMING_SNAKE_CASE__ : List[Any] = [self.lyrics_decoder.get(SCREAMING_SNAKE_CASE__ ) for character in lyric_index]
return artist, genres, lyrics
| 25 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase__ : List[str] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase__ : Optional[int] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in range(len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
SCREAMING_SNAKE_CASE__ : int = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
SCREAMING_SNAKE_CASE__ : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_snake_case )
return next_generation
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for _ in range(_snake_case ):
# Create output image
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.new("""RGB""" ,(len(cells[0] ), len(_snake_case )) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(_snake_case ) ):
for y in range(len(cells[0] ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = 255 - cells[y][x] * 255
SCREAMING_SNAKE_CASE__ : str = (colour, colour, colour)
# Save image
images.append(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = new_generation(_snake_case )
return images
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = generate_images(GLIDER, 1_6)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 25 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
SCREAMING_SNAKE_CASE__ : Union[str, Any] = deepcopy(SCREAMING_SNAKE_CASE__ )
elif os.path.exists(SCREAMING_SNAKE_CASE__ ):
with io.open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE__ : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
else:
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = baseaa.urlsafe_baadecode(SCREAMING_SNAKE_CASE__ ).decode("""utf-8""" )
SCREAMING_SNAKE_CASE__ : Tuple = json.loads(SCREAMING_SNAKE_CASE__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
SCREAMING_SNAKE_CASE__ : int = config
self.set_stage_and_offload()
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_value("""zero_optimization.stage""" , -1 )
# offload
SCREAMING_SNAKE_CASE__ : Any = False
if self.is_zeroa() or self.is_zeroa():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set(["""cpu""", """nvme"""] )
SCREAMING_SNAKE_CASE__ : List[str] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = True
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE__ : List[str] = ds_key_long.split(""".""" )
SCREAMING_SNAKE_CASE__ : Dict = nodes.pop()
for node in nodes:
SCREAMING_SNAKE_CASE__ : List[str] = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
return None, ds_key
return config, ds_key
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.find_config_node(SCREAMING_SNAKE_CASE__ )
if config is None:
return default
return config.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config
# find the config node of interest if it exists
SCREAMING_SNAKE_CASE__ : Optional[Any] = ds_key_long.split(""".""" )
for node in nodes:
SCREAMING_SNAKE_CASE__ : Optional[int] = config
SCREAMING_SNAKE_CASE__ : Optional[int] = config.get(SCREAMING_SNAKE_CASE__ )
if config is None:
if must_exist:
raise ValueError(F'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else bool(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.get_value(SCREAMING_SNAKE_CASE__ )
return False if value is None else not bool(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return self._stage == 2
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return self._stage == 3
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
return self._offload
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = engine
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
self.engine.backward(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , device_placement=SCREAMING_SNAKE_CASE__ , scaler=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = hasattr(self.optimizer , """overflow""" )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=None ) -> Optional[Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.001 , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = params
SCREAMING_SNAKE_CASE__ : Any = lr
SCREAMING_SNAKE_CASE__ : Dict = weight_decay
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = optimizer
SCREAMING_SNAKE_CASE__ : str = total_num_steps
SCREAMING_SNAKE_CASE__ : Tuple = warmup_num_steps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs
| 25 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Any = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = SpeechTaTokenizer
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = True
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[int] = SpeechTaTokenizer(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AddedToken("""<mask>""" , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """this is a test"""
SCREAMING_SNAKE_CASE__ : List[str] = """this is a test"""
return input_text, output_text
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.get_input_output_texts(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
return text, ids
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """<pad>"""
SCREAMING_SNAKE_CASE__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 81 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
SCREAMING_SNAKE_CASE__ : Any = tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : int = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size + len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.vocab_size
SCREAMING_SNAKE_CASE__ : str = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size_a + len(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
pass
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=SCREAMING_SNAKE_CASE__ , )
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = x
SCREAMING_SNAKE_CASE__ : Optional[int] = y
for step in range(_snake_case ): # noqa: B007
SCREAMING_SNAKE_CASE__ : str = a * a - b * b + x
SCREAMING_SNAKE_CASE__ : str = 2 * a * b + y
SCREAMING_SNAKE_CASE__ : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowercase_ ( _snake_case ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowercase_ ( _snake_case ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_snake_case ,1 ,1 ) )
def lowercase_ ( _snake_case = 800 ,_snake_case = 600 ,_snake_case = -0.6 ,_snake_case = 0 ,_snake_case = 3.2 ,_snake_case = 50 ,_snake_case = True ,):
SCREAMING_SNAKE_CASE__ : Tuple = Image.new("""RGB""" ,(image_width, image_height) )
SCREAMING_SNAKE_CASE__ : Dict = img.load()
# loop through the image-coordinates
for image_x in range(_snake_case ):
for image_y in range(_snake_case ):
# determine the figure-coordinates based on the image-coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = figure_width / image_width * image_height
SCREAMING_SNAKE_CASE__ : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
SCREAMING_SNAKE_CASE__ : Any = figure_center_y + (image_y / image_height - 0.5) * figure_height
SCREAMING_SNAKE_CASE__ : str = get_distance(_snake_case ,_snake_case ,_snake_case )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
SCREAMING_SNAKE_CASE__ : List[Any] = get_color_coded_rgb(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = get_black_and_white_rgb(_snake_case )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase__ : str = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 25 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@property
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
return options
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE__ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE__ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : int = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
SCREAMING_SNAKE_CASE__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """A red cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ : Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Any = output.images
SCREAMING_SNAKE_CASE__ : int = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 25 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 1 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = 2_5_6
# Modulus to hash a string
UpperCAmelCase__ : str = 1_0_0_0_0_0_3
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = len(_snake_case )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : str = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE__ : Tuple = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE__ : int = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = """abc1abc12"""
SCREAMING_SNAKE_CASE__ : List[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
SCREAMING_SNAKE_CASE__ : Any = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_snake_case ,_snake_case ) and not rabin_karp(_snake_case ,_snake_case )
# Test 2)
SCREAMING_SNAKE_CASE__ : List[Any] = """ABABX"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """ABABZABABYABABX"""
assert rabin_karp(_snake_case ,_snake_case )
# Test 3)
SCREAMING_SNAKE_CASE__ : Any = """AAAB"""
SCREAMING_SNAKE_CASE__ : int = """ABAAAAAB"""
assert rabin_karp(_snake_case ,_snake_case )
# Test 4)
SCREAMING_SNAKE_CASE__ : str = """abcdabcy"""
SCREAMING_SNAKE_CASE__ : int = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_snake_case ,_snake_case )
# Test 5)
SCREAMING_SNAKE_CASE__ : int = """Lü"""
SCREAMING_SNAKE_CASE__ : str = """Lüsai"""
assert rabin_karp(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Any = """Lue"""
assert not rabin_karp(_snake_case ,_snake_case )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 25 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCAmelCase_ (datasets.BeamBasedBuilder ):
"""simple docstring"""
def __magic_name__ (self ) -> str:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (datasets.BeamBasedBuilder ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def lowercase_ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
@require_beam
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
SCREAMING_SNAKE_CASE__ : str = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
import apache_beam as beam
SCREAMING_SNAKE_CASE__ : Optional[Any] = beam.io.parquetio.WriteToParquet
SCREAMING_SNAKE_CASE__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ : Dict = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
SCREAMING_SNAKE_CASE__ : str = partial(SCREAMING_SNAKE_CASE__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ : Tuple = DummyBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
SCREAMING_SNAKE_CASE__ : Dict = NestedBeamDataset(cache_dir=SCREAMING_SNAKE_CASE__ , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
SCREAMING_SNAKE_CASE__ : Any = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , SCREAMING_SNAKE_CASE__ )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 1 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# Load configuration defined in the metadata file
with open(_snake_case ) as metadata_file:
SCREAMING_SNAKE_CASE__ : Dict = json.load(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = LukeConfig(use_entity_aware_attention=_snake_case ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE__ : int = torch.load(_snake_case ,map_location="""cpu""" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE__ : Dict = load_entity_vocab(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE__ : int = AddedToken("""<ent>""" ,lstrip=_snake_case ,rstrip=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken("""<ent2>""" ,lstrip=_snake_case ,rstrip=_snake_case )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_snake_case )
with open(os.path.join(_snake_case ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : int = LukeTokenizer.from_pretrained(_snake_case )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict["""embeddings.word_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : str = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE__ : List[str] = f'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE__ : int = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[str] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE__ : Any = state_dict["""entity_embeddings.entity_embeddings.weight"""]
SCREAMING_SNAKE_CASE__ : Dict = entity_emb[entity_vocab["""[MASK]"""]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = LukeModel(config=_snake_case ).eval()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = model.load_state_dict(_snake_case ,strict=_snake_case )
if not (len(_snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {', '.join(_snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' )
# Check outputs
SCREAMING_SNAKE_CASE__ : Dict = LukeTokenizer.from_pretrained(_snake_case ,task="""entity_classification""" )
SCREAMING_SNAKE_CASE__ : str = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = (39, 42)
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(_snake_case ,entity_spans=[span] ,add_prefix_space=_snake_case ,return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Any = model(**_snake_case )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ : Any = torch.Size((1, 42, 1_024) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 42, 768) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_snake_case ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 1, 1_024) )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
SCREAMING_SNAKE_CASE__ : Dict = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,_snake_case ,atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(_snake_case ) )
model.save_pretrained(_snake_case )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : str = {}
with open(_snake_case ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(_snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = line.rstrip().split("""\t""" )
SCREAMING_SNAKE_CASE__ : Dict = index
return entity_vocab
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 25 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be an 'int' type""" )
SCREAMING_SNAKE_CASE__ : int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ : Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Any = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE__ : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ : Any = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = """lower newer"""
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(text=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = """lower newer"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : str = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : List[str] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ : Dict = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_image_processor()
SCREAMING_SNAKE_CASE__ : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : List[str] = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = """lower newer"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ : List[str] = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 25 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ : List[Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Any = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ : Optional[int] = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
SCREAMING_SNAKE_CASE__ : Optional[int] = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' ,_snake_case ,)
is not None
):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
SCREAMING_SNAKE_CASE__ : str = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
SCREAMING_SNAKE_CASE__ : int = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
SCREAMING_SNAKE_CASE__ : List[str] = True
if not attribute_used:
SCREAMING_SNAKE_CASE__ : List[str] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
SCREAMING_SNAKE_CASE__ : str = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
SCREAMING_SNAKE_CASE__ : Any = True
elif attribute.endswith("""_token_id""" ):
SCREAMING_SNAKE_CASE__ : Dict = True
# configuration class specific cases
if not case_allowed:
SCREAMING_SNAKE_CASE__ : List[str] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
SCREAMING_SNAKE_CASE__ : str = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
SCREAMING_SNAKE_CASE__ : str = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
SCREAMING_SNAKE_CASE__ : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
SCREAMING_SNAKE_CASE__ : Tuple = {}
if len(config_class.attribute_map ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
SCREAMING_SNAKE_CASE__ : Any = inspect.getsourcefile(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.dirname(_snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [os.path.join(_snake_case ,_snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith("""modeling_""" )]
# Get the source code strings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for path in modeling_paths:
if os.path.isfile(_snake_case ):
with open(_snake_case ) as fp:
modeling_sources.append(fp.read() )
SCREAMING_SNAKE_CASE__ : Any = []
for config_param, default_value in zip(_snake_case ,_snake_case ):
# `attributes` here is all the variant names for `config_param`
SCREAMING_SNAKE_CASE__ : Dict = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
unused_attributes.append(attributes[0] )
return sorted(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
SCREAMING_SNAKE_CASE__ : List[str] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda _snake_case : inspect.isclass(_snake_case )
and issubclass(_snake_case ,_snake_case )
and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
SCREAMING_SNAKE_CASE__ : Optional[int] = check_config_attributes_being_used(_snake_case )
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = unused_attributes
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_snake_case )
if __name__ == "__main__":
check_config_attributes()
| 25 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 1 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase__ : str = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase__ : str = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase__ : Optional[int] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ )
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 25 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase__ : Union[str, Any] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case=None ,):
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=0.02 , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : Dict = seq_length
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = eos_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : Any = initializer_range
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE__ : Dict = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
SCREAMING_SNAKE_CASE__ : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Dict = prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = 20
SCREAMING_SNAKE_CASE__ : int = model_class_name(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = model.encode(inputs_dict["""input_ids"""] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ : str = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = 20
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class_name(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.encode(inputs_dict["""input_ids"""] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
SCREAMING_SNAKE_CASE__ : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ : List[str] = model.decode(
decoder_input_ids[:, -1:] , SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , decoder_position_ids=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.decode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = 99
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_config_and_data()
SCREAMING_SNAKE_CASE__ : str = FlaxBlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = lm_model(input_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE__ : Dict = FlaxBlenderbotForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : int = lm_model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE__ : List[Any] = shift_tokens_right(SCREAMING_SNAKE_CASE__ , 1 , 2 )
SCREAMING_SNAKE_CASE__ : List[Any] = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.equal(SCREAMING_SNAKE_CASE__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(SCREAMING_SNAKE_CASE__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCAmelCase_ (a__ , unittest.TestCase , a__ ):
"""simple docstring"""
__UpperCamelCase : Any = True
__UpperCamelCase : Union[str, Any] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase : Union[str, Any] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = FlaxBlenderbotModelTester(self )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
@jax.jit
def encode_jitted(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
return model.encode(input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : Any = encode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
SCREAMING_SNAKE_CASE__ : Any = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return model.decode(
decoder_input_ids=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , encoder_outputs=SCREAMING_SNAKE_CASE__ , )
with self.subTest("""JIT Enabled""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : Optional[int] = decode_jitted(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE__ : Any = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
SCREAMING_SNAKE_CASE__ : Any = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""Sam"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="""jax""" )
SCREAMING_SNAKE_CASE__ : Dict = model.generate(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = """Sam is a great name. It means \"sun\" in Gaelic."""
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
assert generated_txt[0].strip() == tgt_text
| 25 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ : Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 5_12 , SCREAMING_SNAKE_CASE__ = 5_12 , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = 7.5 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : str = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(SCREAMING_SNAKE_CASE__ )}.''' )
# get prompt text embeddings
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : str = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE__ : str = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
SCREAMING_SNAKE_CASE__ : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = text_embeddings.shape
SCREAMING_SNAKE_CASE__ : int = text_embeddings.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE__ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE__ : str = [""""""]
elif type(SCREAMING_SNAKE_CASE__ ) is not type(SCREAMING_SNAKE_CASE__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE__ )} !='''
F''' {type(SCREAMING_SNAKE_CASE__ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = negative_prompt
SCREAMING_SNAKE_CASE__ : Tuple = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" , )
SCREAMING_SNAKE_CASE__ : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE__ : Any = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE__ : Any = uncond_embeddings.repeat(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
SCREAMING_SNAKE_CASE__ : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE__ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE__ : str = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
SCREAMING_SNAKE_CASE__ : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE__ ).to(self.device )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device="""cpu""" , dtype=SCREAMING_SNAKE_CASE__ ).to(
self.device )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.randn(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.randn(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = latents_reference.to(self.device )
SCREAMING_SNAKE_CASE__ : Any = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
SCREAMING_SNAKE_CASE__ : List[str] = (latents_shape[3] - latents_shape_reference[3]) // 2
SCREAMING_SNAKE_CASE__ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
SCREAMING_SNAKE_CASE__ : Dict = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
SCREAMING_SNAKE_CASE__ : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
SCREAMING_SNAKE_CASE__ : int = 0 if dx < 0 else dx
SCREAMING_SNAKE_CASE__ : Tuple = 0 if dy < 0 else dy
SCREAMING_SNAKE_CASE__ : List[Any] = max(-dx , 0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
SCREAMING_SNAKE_CASE__ : List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE__ : Optional[int] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ : int = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ : Tuple = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ : Optional[Any] = eta
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE__ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# predict the noise residual
SCREAMING_SNAKE_CASE__ : Dict = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ : int = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = 1 / 0.18215 * latents
SCREAMING_SNAKE_CASE__ : Any = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample
SCREAMING_SNAKE_CASE__ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor(self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) , return_tensors="""pt""" ).to(
self.device )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.safety_checker(
images=SCREAMING_SNAKE_CASE__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
if output_type == "pil":
SCREAMING_SNAKE_CASE__ : Dict = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=SCREAMING_SNAKE_CASE__ , nsfw_content_detected=SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : Optional[Any] = {
'7B': 1_1_0_0_8,
'13B': 1_3_8_2_4,
'30B': 1_7_9_2_0,
'65B': 2_2_0_1_6,
'70B': 2_8_6_7_2,
}
UpperCAmelCase__ : List[str] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def lowercase_ ( _snake_case ,_snake_case=1 ,_snake_case=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase_ ( _snake_case ):
with open(_snake_case ,"""r""" ) as f:
return json.load(_snake_case )
def lowercase_ ( _snake_case ,_snake_case ):
with open(_snake_case ,"""w""" ) as f:
json.dump(_snake_case ,_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=True ):
os.makedirs(_snake_case ,exist_ok=_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(_snake_case ,"""tmp""" )
os.makedirs(_snake_case ,exist_ok=_snake_case )
SCREAMING_SNAKE_CASE__ : Any = read_json(os.path.join(_snake_case ,"""params.json""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NUM_SHARDS[model_size]
SCREAMING_SNAKE_CASE__ : Optional[int] = params["""n_layers"""]
SCREAMING_SNAKE_CASE__ : str = params["""n_heads"""]
SCREAMING_SNAKE_CASE__ : List[str] = n_heads // num_shards
SCREAMING_SNAKE_CASE__ : List[str] = params["""dim"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = dim // n_heads
SCREAMING_SNAKE_CASE__ : str = 10000.0
SCREAMING_SNAKE_CASE__ : List[str] = 1.0 / (base ** (torch.arange(0 ,_snake_case ,2 ).float() / dims_per_head))
if "n_kv_heads" in params:
SCREAMING_SNAKE_CASE__ : List[str] = params["""n_kv_heads"""] # for GQA / MQA
SCREAMING_SNAKE_CASE__ : str = n_heads_per_shard // num_key_value_heads
SCREAMING_SNAKE_CASE__ : str = dim // num_key_value_heads
else: # compatibility with other checkpoints
SCREAMING_SNAKE_CASE__ : List[Any] = n_heads
SCREAMING_SNAKE_CASE__ : List[Any] = n_heads_per_shard
SCREAMING_SNAKE_CASE__ : List[Any] = dim
# permute for sliced rotary
def permute(_snake_case ,_snake_case=n_heads ,_snake_case=dim ,_snake_case=dim ):
return w.view(_snake_case ,dima // n_heads // 2 ,2 ,_snake_case ).transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(os.path.join(_snake_case ,"""consolidated.00.pth""" ) ,map_location="""cpu""" )
else:
# Sharded
SCREAMING_SNAKE_CASE__ : Tuple = [
torch.load(os.path.join(_snake_case ,f'''consolidated.{i:02d}.pth''' ) ,map_location="""cpu""" )
for i in range(_snake_case )
]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : List[Any] = {"""weight_map""": {}}
for layer_i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : Dict = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
SCREAMING_SNAKE_CASE__ : int = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
SCREAMING_SNAKE_CASE__ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_snake_case ,_snake_case ,_snake_case )
for i in range(_snake_case )
] ,dim=0 ,).reshape(_snake_case ,_snake_case ) )
SCREAMING_SNAKE_CASE__ : List[Any] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_snake_case ,_snake_case ,_snake_case )
for i in range(_snake_case )
] ,dim=0 ,).reshape(_snake_case ,_snake_case ) ,_snake_case ,_snake_case ,_snake_case ,)
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_snake_case ,_snake_case ,_snake_case )
for i in range(_snake_case )
] ,dim=0 ,).reshape(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_snake_case )] ,dim=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_snake_case )] ,dim=0 )
SCREAMING_SNAKE_CASE__ : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_snake_case )] ,dim=1 )
SCREAMING_SNAKE_CASE__ : Any = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_snake_case )] ,dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inv_freq
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[int] = filename
param_count += v.numel()
torch.save(_snake_case ,os.path.join(_snake_case ,_snake_case ) )
SCREAMING_SNAKE_CASE__ : int = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
SCREAMING_SNAKE_CASE__ : Dict = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(_snake_case )] ,dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(_snake_case )] ,dim=0 ),
}
for k, v in state_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = filename
param_count += v.numel()
torch.save(_snake_case ,os.path.join(_snake_case ,_snake_case ) )
# Write configs
SCREAMING_SNAKE_CASE__ : str = {"""total_size""": param_count * 2}
write_json(_snake_case ,os.path.join(_snake_case ,"""pytorch_model.bin.index.json""" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
SCREAMING_SNAKE_CASE__ : Tuple = params["""multiple_of"""] if """multiple_of""" in params else 256
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LlamaConfig(
hidden_size=_snake_case ,intermediate_size=compute_intermediate_size(_snake_case ,_snake_case ,_snake_case ) ,num_attention_heads=params["""n_heads"""] ,num_hidden_layers=params["""n_layers"""] ,rms_norm_eps=params["""norm_eps"""] ,num_key_value_heads=_snake_case ,)
config.save_pretrained(_snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = LlamaForCausalLM.from_pretrained(_snake_case ,torch_dtype=torch.floataa ,low_cpu_mem_usage=_snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(_snake_case ,safe_serialization=_snake_case )
shutil.rmtree(_snake_case )
def lowercase_ ( _snake_case ,_snake_case ):
# Initialize the tokenizer based on the `spm` model
SCREAMING_SNAKE_CASE__ : int = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer_class(_snake_case )
tokenizer.save_pretrained(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" ,help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" ,)
parser.add_argument(
"""--model_size""" ,choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] ,)
parser.add_argument(
"""--output_dir""" ,help="""Location to write HF model and tokenizer""" ,)
parser.add_argument("""--safe_serialization""" ,type=_snake_case ,help="""Whether or not to save using `safetensors`.""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir ,input_base_path=os.path.join(args.input_dir ,args.model_size ) ,model_size=args.model_size ,safe_serialization=args.safe_serialization ,)
SCREAMING_SNAKE_CASE__ : Any = os.path.join(args.input_dir ,"""tokenizer.model""" )
write_tokenizer(args.output_dir ,_snake_case )
if __name__ == "__main__":
main()
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Dict = 5
UpperCAmelCase__ : Union[str, Any] = 1_0
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : int = SpeechaTextTokenizer
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[Any] = True
def __magic_name__ (self ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : List[str] = sp.SentencePieceProcessor()
spm_model.Load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(SCREAMING_SNAKE_CASE__ ) )]
SCREAMING_SNAKE_CASE__ : Any = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(self.tmpdirname )
save_json(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(SCREAMING_SNAKE_CASE__ , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """<pad>"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_01 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [2_89, 50, 14, 1_74, 3_86] , )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = '''valhalla/s2t_mustc_multilinguial_medium'''
__UpperCamelCase : Tuple = '''C\'est trop cool'''
__UpperCamelCase : Optional[int] = '''Esto es genial'''
@classmethod
def __magic_name__ (cls ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE__ , self.tokenizer.all_special_ids )
SCREAMING_SNAKE_CASE__ : str = [ES_CODE, 4, 16_01, 47, 76_47, 2]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """fr"""
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
SCREAMING_SNAKE_CASE__ : Optional[int] = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 25 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 1 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 1 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
UpperCAmelCase__ : List[str] = {
'n_samples': 6_4,
'horizon': 3_2,
'num_inference_steps': 2_0,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
UpperCAmelCase__ : Any = 'hopper-medium-v2'
UpperCAmelCase__ : Union[str, Any] = gym.make(env_name)
UpperCAmelCase__ : List[str] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
UpperCAmelCase__ : str = env.reset()
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[Any] = 1_0_0_0
UpperCAmelCase__ : Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
UpperCAmelCase__ : Optional[Any] = pipeline(obs, planning_horizon=3_2)
# execute action in environment
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = env.step(denorm_actions)
UpperCAmelCase__ : Dict = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
UpperCAmelCase__ : List[Any] = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 25 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 1 |
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
UpperCAmelCase__ : Optional[Any] = get_logger(__name__)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
os.path.join(SCREAMING_SNAKE_CASE__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
SCREAMING_SNAKE_CASE__ : Tuple = Extractor
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
SCREAMING_SNAKE_CASE__ : str = os.path.abspath(SCREAMING_SNAKE_CASE__ )
return os.path.join(self.extract_dir , hash_url_to_filename(SCREAMING_SNAKE_CASE__ ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(SCREAMING_SNAKE_CASE__ ) and not (os.path.isdir(SCREAMING_SNAKE_CASE__ ) and os.listdir(SCREAMING_SNAKE_CASE__ ))
)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.extractor.infer_extractor_format(SCREAMING_SNAKE_CASE__ )
if not extractor_format:
return input_path
SCREAMING_SNAKE_CASE__ : Any = self._get_output_path(SCREAMING_SNAKE_CASE__ )
if self._do_extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return output_path
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
@classmethod
@abstractmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
...
class lowerCAmelCase_ (a__ , a__ ):
"""simple docstring"""
__UpperCamelCase : List[bytes] = []
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f:
return f.read(SCREAMING_SNAKE_CASE__ )
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
SCREAMING_SNAKE_CASE__ : Optional[int] = max(len(SCREAMING_SNAKE_CASE__ ) for cls_magic_number in cls.magic_numbers )
try:
SCREAMING_SNAKE_CASE__ : List[str] = cls.read_magic_number(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
return False
return any(magic_number.startswith(SCREAMING_SNAKE_CASE__ ) for cls_magic_number in cls.magic_numbers )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(SCREAMING_SNAKE_CASE__ )
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
def resolved(SCREAMING_SNAKE_CASE__ ) -> str:
return os.path.realpath(os.path.abspath(SCREAMING_SNAKE_CASE__ ) )
def badpath(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ).startswith(SCREAMING_SNAKE_CASE__ )
def badlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
# Links are interpreted relative to the directory containing the link
SCREAMING_SNAKE_CASE__ : List[str] = resolved(os.path.join(SCREAMING_SNAKE_CASE__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = resolved(SCREAMING_SNAKE_CASE__ )
for finfo in members:
if badpath(finfo.name , SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tarfile.open(SCREAMING_SNAKE_CASE__ )
tar_file.extractall(SCREAMING_SNAKE_CASE__ , members=TarExtractor.safemembers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
tar_file.close()
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = [b'''\x1F\x8B''']
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
with gzip.open(SCREAMING_SNAKE_CASE__ , """rb""" ) as gzip_file:
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(SCREAMING_SNAKE_CASE__ , magic_number=SCREAMING_SNAKE_CASE__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as fp:
SCREAMING_SNAKE_CASE__ : Tuple = _EndRecData(SCREAMING_SNAKE_CASE__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = fp.read(SCREAMING_SNAKE_CASE__ ) # CD is where we expect it to be
if len(SCREAMING_SNAKE_CASE__ ) == sizeCentralDir:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = struct.unpack(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with zipfile.ZipFile(SCREAMING_SNAKE_CASE__ , """r""" ) as zip_file:
zip_file.extractall(SCREAMING_SNAKE_CASE__ )
zip_file.close()
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
with lzma.open(SCREAMING_SNAKE_CASE__ ) as compressed_file:
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = rarfile.RarFile(SCREAMING_SNAKE_CASE__ )
rf.extractall(SCREAMING_SNAKE_CASE__ )
rf.close()
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
SCREAMING_SNAKE_CASE__ : Union[str, Any] = zstd.ZstdDecompressor()
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as ifh, open(SCREAMING_SNAKE_CASE__ , """wb""" ) as ofh:
dctx.copy_stream(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = [b'''\x42\x5A\x68''']
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
with bza.open(SCREAMING_SNAKE_CASE__ , """rb""" ) as compressed_file:
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Any = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with pyazr.SevenZipFile(SCREAMING_SNAKE_CASE__ , """r""" ) as archive:
archive.extractall(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Tuple = [b'''\x04\x22\x4D\x18''']
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(SCREAMING_SNAKE_CASE__ , """rb""" ) as compressed_file:
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as extracted_file:
shutil.copyfileobj(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __magic_name__ (cls ) -> str:
"""simple docstring"""
return max(
len(SCREAMING_SNAKE_CASE__ )
for extractor in cls.extractors.values()
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(SCREAMING_SNAKE_CASE__ , magic_number_length=SCREAMING_SNAKE_CASE__ )
except OSError:
return b""
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ) -> bool:
"""simple docstring"""
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = cls.infer_extractor_format(SCREAMING_SNAKE_CASE__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = cls._get_magic_number_max_length()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cls._read_magic_number(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(SCREAMING_SNAKE_CASE__ , magic_number=SCREAMING_SNAKE_CASE__ ):
return extractor_format
@classmethod
def __magic_name__ (cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(SCREAMING_SNAKE_CASE__ ) , exist_ok=SCREAMING_SNAKE_CASE__ )
# Prevent parallel extractions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = str(Path(SCREAMING_SNAKE_CASE__ ).with_suffix(""".lock""" ) )
with FileLock(SCREAMING_SNAKE_CASE__ ):
shutil.rmtree(SCREAMING_SNAKE_CASE__ , ignore_errors=SCREAMING_SNAKE_CASE__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = extractor if extractor != """deprecated""" else extractor_format
else:
SCREAMING_SNAKE_CASE__ : int = cls.extractors[extractor_format]
return extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=SCREAMING_SNAKE_CASE__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(SCREAMING_SNAKE_CASE__ ):
return extractor.extract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 1 |
"""simple docstring"""
from math import factorial
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = real
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Tuple = [1] * rank
else:
SCREAMING_SNAKE_CASE__ : int = rank
def __repr__(self ) -> Optional[int]:
"""simple docstring"""
return (
F'''{self.real}+'''
F'''{'+'.join(str(SCREAMING_SNAKE_CASE__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE__ )
def __add__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return Dual(self.real + other , self.duals )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.duals.copy()
SCREAMING_SNAKE_CASE__ : Any = other.duals.copy()
if len(SCREAMING_SNAKE_CASE__ ) > len(SCREAMING_SNAKE_CASE__ ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ )) )
elif len(SCREAMING_SNAKE_CASE__ ) < len(SCREAMING_SNAKE_CASE__ ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE__ ) - len(SCREAMING_SNAKE_CASE__ )) )
SCREAMING_SNAKE_CASE__ : Tuple = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase : int = __add__
def __sub__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return self + other * -1
def __mul__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase : Any = __mul__
def __truediv__(self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE__ )
raise ValueError
def __floordiv__(self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE__ )
raise ValueError
def __pow__(self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
if n < 0 or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
if not callable(_snake_case ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_snake_case ,(float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("""differentiate() requires an int as input for order""" )
SCREAMING_SNAKE_CASE__ : Any = Dual(_snake_case ,1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = func(_snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowercase_ ( _snake_case ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=4_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=False , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = size if size is not None else {"""height""": 20, """width""": 20}
SCREAMING_SNAKE_CASE__ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = min_resolution
SCREAMING_SNAKE_CASE__ : int = max_resolution
SCREAMING_SNAKE_CASE__ : Dict = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE__ : Any = do_normalize
SCREAMING_SNAKE_CASE__ : str = image_mean
SCREAMING_SNAKE_CASE__ : str = image_std
SCREAMING_SNAKE_CASE__ : Any = do_reduce_labels
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.open(dataset[0]["""file"""] )
SCREAMING_SNAKE_CASE__ : int = Image.open(dataset[1]["""file"""] )
return image, map
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
SCREAMING_SNAKE_CASE__ : Dict = Image.open(ds[0]["""file"""] )
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(ds[1]["""file"""] )
SCREAMING_SNAKE_CASE__ : List[str] = Image.open(ds[2]["""file"""] )
SCREAMING_SNAKE_CASE__ : Tuple = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = BeitImageProcessingTester(self )
@property
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """center_crop""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """do_normalize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_mean""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , """image_std""" ) )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=SCREAMING_SNAKE_CASE__ )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
pass
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : Dict = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 1_50 )
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : int = image_processing(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 2_55 )
| 25 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = R"""\w+[.]\d+"""
SCREAMING_SNAKE_CASE__ : Dict = re.findall(_snake_case ,_snake_case )
for pat in pats:
SCREAMING_SNAKE_CASE__ : Dict = key.replace(_snake_case ,"""_""".join(pat.split(""".""" ) ) )
return key
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
SCREAMING_SNAKE_CASE__ : Tuple = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE__ : List[Any] = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
SCREAMING_SNAKE_CASE__ : Any = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
SCREAMING_SNAKE_CASE__ : str = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
SCREAMING_SNAKE_CASE__ : Tuple = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
SCREAMING_SNAKE_CASE__ : Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
SCREAMING_SNAKE_CASE__ : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
SCREAMING_SNAKE_CASE__ : List[str] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
SCREAMING_SNAKE_CASE__ : str = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=42 ):
# Step 1: Convert pytorch tensor to numpy
SCREAMING_SNAKE_CASE__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
SCREAMING_SNAKE_CASE__ : Optional[int] = flax_model.init_weights(PRNGKey(_snake_case ) )
SCREAMING_SNAKE_CASE__ : Any = flatten_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
SCREAMING_SNAKE_CASE__ : Dict = rename_key(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = rename_key_and_reshape_tensor(_snake_case ,_snake_case ,_snake_case )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
SCREAMING_SNAKE_CASE__ : List[str] = jnp.asarray(_snake_case )
return unflatten_dict(_snake_case )
| 25 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase__ : str = 5_0_0_0_0
UpperCAmelCase__ : List[str] = 5_0_0_0
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = os.path.split(__file__)
UpperCAmelCase__ : Optional[Any] = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowercase_ ( _snake_case ,_snake_case ):
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = dataset[i]
@get_duration
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
for i in range(0 ,len(_snake_case ) ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = dataset[i : i + batch_size]
@get_duration
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
with dataset.formatted_as(type=_snake_case ):
for i in range(_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = dataset[i]
@get_duration
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
with dataset.formatted_as(type=_snake_case ):
for i in range(0 ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = dataset[i : i + batch_size]
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : List[Any] = {"""num examples""": SPEED_TEST_N_EXAMPLES}
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
SCREAMING_SNAKE_CASE__ : str = generate_example_dataset(
os.path.join(_snake_case ,"""dataset.arrow""" ) ,_snake_case ,num_examples=_snake_case ,seq_shapes={"""list""": (100,)} ,)
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ ,str(_snake_case ) )
SCREAMING_SNAKE_CASE__ : str = func(_snake_case ,**_snake_case )
print("""shuffling dataset""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ ,func.__name__ ,str(_snake_case ) )
SCREAMING_SNAKE_CASE__ : Tuple = func(
_snake_case ,**_snake_case )
with open(_snake_case ,"""wb""" ) as f:
f.write(json.dumps(_snake_case ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 25 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 1 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True)
os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True)
def lowercase_ ( _snake_case ):
if hor == 128:
SCREAMING_SNAKE_CASE__ : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (32, 128, 256)
SCREAMING_SNAKE_CASE__ : List[Any] = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
SCREAMING_SNAKE_CASE__ : Dict = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
SCREAMING_SNAKE_CASE__ : Tuple = (32, 64, 128, 256)
SCREAMING_SNAKE_CASE__ : Tuple = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
SCREAMING_SNAKE_CASE__ : Dict = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
SCREAMING_SNAKE_CASE__ : List[str] = model.state_dict()
SCREAMING_SNAKE_CASE__ : str = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65_536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDModel(**_snake_case )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE__ : int = state_dict.pop(_snake_case )
hf_value_function.load_state_dict(_snake_case )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,"""w""" ) as f:
json.dump(_snake_case ,_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Dict = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65_536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
SCREAMING_SNAKE_CASE__ : Tuple = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model
SCREAMING_SNAKE_CASE__ : str = UNetaDModel(**_snake_case )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict.pop(_snake_case )
hf_value_function.load_state_dict(_snake_case )
torch.save(hf_value_function.state_dict() ,"""hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" ,"""w""" ) as f:
json.dump(_snake_case ,_snake_case )
if __name__ == "__main__":
unet(3_2)
# unet(128)
value_function()
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ : List[str] = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 1 |
"""simple docstring"""
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : str = 0
SCREAMING_SNAKE_CASE__ : int = {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
if vertex not in self.adjacency:
SCREAMING_SNAKE_CASE__ : Any = {}
self.num_vertices += 1
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.add_vertex(SCREAMING_SNAKE_CASE__ )
self.add_vertex(SCREAMING_SNAKE_CASE__ )
if head == tail:
return
SCREAMING_SNAKE_CASE__ : int = weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = weight
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : List[str] = list(edges[i] )
edges.sort(key=lambda SCREAMING_SNAKE_CASE__ : e[2] )
for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
SCREAMING_SNAKE_CASE__ : Any = edges[i][2] + 1
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = edge
SCREAMING_SNAKE_CASE__ : Any = weight
SCREAMING_SNAKE_CASE__ : List[str] = weight
def __str__(self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("""\n""" )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = Graph()
if vertices is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
if edges is None:
SCREAMING_SNAKE_CASE__ : List[str] = []
for vertex in vertices:
g.add_vertex(SCREAMING_SNAKE_CASE__ )
for edge in edges:
g.add_edge(*SCREAMING_SNAKE_CASE__ )
return g
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
def __len__(self ) -> Any:
"""simple docstring"""
return len(self.parent )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
if item in self.parent:
return self.find(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = item
SCREAMING_SNAKE_CASE__ : Dict = 0
return item
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
if item not in self.parent:
return self.make_set(SCREAMING_SNAKE_CASE__ )
if item != self.parent[item]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.find(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.find(SCREAMING_SNAKE_CASE__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
SCREAMING_SNAKE_CASE__ : str = roota
return roota
if self.rank[roota] < self.rank[roota]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = roota
return roota
return None
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = graph.num_vertices
SCREAMING_SNAKE_CASE__ : List[str] = Graph.UnionFind()
SCREAMING_SNAKE_CASE__ : Tuple = []
while num_components > 1:
SCREAMING_SNAKE_CASE__ : List[str] = {}
for vertex in graph.get_vertices():
SCREAMING_SNAKE_CASE__ : List[str] = -1
SCREAMING_SNAKE_CASE__ : str = graph.get_edges()
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = edge
edges.remove((tail, head, weight) )
for edge in edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = edge
SCREAMING_SNAKE_CASE__ : int = union_find.find(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = union_find.find(SCREAMING_SNAKE_CASE__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ : Dict = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
SCREAMING_SNAKE_CASE__ : List[Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = cheap_edge[vertex]
if union_find.find(SCREAMING_SNAKE_CASE__ ) != union_find.find(SCREAMING_SNAKE_CASE__ ):
union_find.union(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
mst_edges.append(cheap_edge[vertex] )
SCREAMING_SNAKE_CASE__ : Tuple = num_components - 1
SCREAMING_SNAKE_CASE__ : int = Graph.build(edges=SCREAMING_SNAKE_CASE__ )
return mst
| 25 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 1 |
"""simple docstring"""
import sys
import turtle
def lowercase_ ( _snake_case ,_snake_case ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,):
my_pen.up()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
my_pen.goto(vertexa[0] ,vertexa[1] )
if depth == 0:
return
triangle(_snake_case ,get_mid(_snake_case ,_snake_case ) ,get_mid(_snake_case ,_snake_case ) ,depth - 1 )
triangle(_snake_case ,get_mid(_snake_case ,_snake_case ) ,get_mid(_snake_case ,_snake_case ) ,depth - 1 )
triangle(_snake_case ,get_mid(_snake_case ,_snake_case ) ,get_mid(_snake_case ,_snake_case ) ,depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
UpperCAmelCase__ : List[Any] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
UpperCAmelCase__ : Union[str, Any] = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 25 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = '''mobilenet_v2'''
def __init__(self , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu6" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.8 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.001 , SCREAMING_SNAKE_CASE__=2_55 , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : List[str] = depth_multiplier
SCREAMING_SNAKE_CASE__ : Tuple = depth_divisible_by
SCREAMING_SNAKE_CASE__ : Tuple = min_depth
SCREAMING_SNAKE_CASE__ : List[str] = expand_ratio
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_stride
SCREAMING_SNAKE_CASE__ : List[Any] = first_layer_is_expansion
SCREAMING_SNAKE_CASE__ : int = finegrained_output
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = tf_padding
SCREAMING_SNAKE_CASE__ : Union[str, Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = semantic_loss_ignore_index
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
| 25 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 1 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : Optional[Any] = 'src/diffusers'
UpperCAmelCase__ : Any = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCAmelCase__ : int = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCAmelCase__ : Optional[int] = spec.loader.load_module()
def lowercase_ ( _snake_case ,_snake_case ):
return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" ,_snake_case ) is not None
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = object_name.split(""".""" )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE__ : Dict = parts[i]
while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case ,f'''{module}.py''' ) ):
i += 1
if i < len(_snake_case ):
SCREAMING_SNAKE_CASE__ : str = os.path.join(_snake_case ,parts[i] )
if i >= len(_snake_case ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(_snake_case ,f'''{module}.py''' ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE__ : str = """"""
SCREAMING_SNAKE_CASE__ : Dict = 0
for name in parts[i + 1 :]:
while (
line_index < len(_snake_case ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_snake_case ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE__ : List[Any] = line_index
while line_index < len(_snake_case ) and _should_continue(lines[line_index] ,_snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = lines[start_index:line_index]
return "".join(_snake_case )
UpperCAmelCase__ : Optional[int] = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCAmelCase__ : Any = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCAmelCase__ : Optional[Any] = re.compile(r'<FILL\s+[^>]*>')
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = code.split("""\n""" )
SCREAMING_SNAKE_CASE__ : str = 0
while idx < len(_snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_snake_case ):
return re.search(R"""^(\s*)\S""" ,lines[idx] ).groups()[0]
return ""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = len(get_indent(_snake_case ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE__ : Dict = f'''class Bla:\n{code}'''
SCREAMING_SNAKE_CASE__ : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ,preview=_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = black.format_str(_snake_case ,mode=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = style_docstrings_in_code(_snake_case )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowercase_ ( _snake_case ,_snake_case=False ):
with open(_snake_case ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = f.readlines()
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = search.groups()
SCREAMING_SNAKE_CASE__ : Optional[Any] = find_code_in_diffusers(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_indent(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE__ : List[str] = theoretical_indent
SCREAMING_SNAKE_CASE__ : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
while line_index < len(_snake_case ) and should_continue:
line_index += 1
if line_index >= len(_snake_case ):
break
SCREAMING_SNAKE_CASE__ : List[str] = lines[line_index]
SCREAMING_SNAKE_CASE__ : Tuple = _should_continue(_snake_case ,_snake_case ) and re.search(f'''^{indent}# End copy''' ,_snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE__ : Tuple = lines[start_index:line_index]
SCREAMING_SNAKE_CASE__ : Any = """""".join(_snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_snake_case ) is None]
SCREAMING_SNAKE_CASE__ : Tuple = """\n""".join(_snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : str = replace_pattern.replace("""with""" ,"""""" ).split(""",""" )
SCREAMING_SNAKE_CASE__ : List[str] = [_re_replace_pattern.search(_snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = pattern.groups()
SCREAMING_SNAKE_CASE__ : Dict = re.sub(_snake_case ,_snake_case ,_snake_case )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE__ : List[Any] = re.sub(obja.lower() ,obja.lower() ,_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(obja.upper() ,obja.upper() ,_snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE__ : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE__ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE__ : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE__ : Optional[int] = start_index + 1
if overwrite and len(_snake_case ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(_snake_case ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(_snake_case )
return diffs
def lowercase_ ( _snake_case = False ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = glob.glob(os.path.join(_snake_case ,"""**/*.py""" ) ,recursive=_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for filename in all_files:
SCREAMING_SNAKE_CASE__ : Tuple = is_copy_consistent(_snake_case ,_snake_case )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : List[Any] = """\n""".join(_snake_case )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCAmelCase__ : Dict = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase__ : List[Any] = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
UpperCAmelCase__ : Optional[int] = {
'camembert-base': 5_1_2,
}
UpperCAmelCase__ : List[Any] = '▁'
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
SCREAMING_SNAKE_CASE__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
SCREAMING_SNAKE_CASE__ : Any = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE__ : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Tuple = """"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : List[str] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __getstate__(self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Any = None
return state
def __setstate__(self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : int = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 25 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[str] = '''mgp-str'''
def __init__(self , SCREAMING_SNAKE_CASE__=[32, 1_28] , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=27 , SCREAMING_SNAKE_CASE__=38 , SCREAMING_SNAKE_CASE__=5_02_57 , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=4.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.02 , **SCREAMING_SNAKE_CASE__ , ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : int = patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = max_token_length
SCREAMING_SNAKE_CASE__ : Any = num_character_labels
SCREAMING_SNAKE_CASE__ : Tuple = num_bpe_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_wordpiece_labels
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ : Tuple = distilled
SCREAMING_SNAKE_CASE__ : List[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = drop_rate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attn_drop_rate
SCREAMING_SNAKE_CASE__ : Dict = drop_path_rate
SCREAMING_SNAKE_CASE__ : Any = output_aa_attentions
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
| 25 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 1 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = data
SCREAMING_SNAKE_CASE__ : List[Any] = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def __magic_name__ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64)
SCREAMING_SNAKE_CASE__ : Dict = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def __magic_name__ (self ) -> int:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = list(struct.unpack(""">16L""" , SCREAMING_SNAKE_CASE__ ) ) + [0] * 64
for i in range(16 , 80 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.padding()
SCREAMING_SNAKE_CASE__ : Any = self.split_blocks()
for block in self.blocks:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.expand_block(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
SCREAMING_SNAKE_CASE__ : List[str] = (b & c) | ((~b) & d)
SCREAMING_SNAKE_CASE__ : Dict = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
SCREAMING_SNAKE_CASE__ : Optional[Any] = b ^ c ^ d
SCREAMING_SNAKE_CASE__ : str = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
SCREAMING_SNAKE_CASE__ : Any = (b & c) | (b & d) | (c & d)
SCREAMING_SNAKE_CASE__ : List[Any] = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
SCREAMING_SNAKE_CASE__ : Optional[int] = b ^ c ^ d
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0Xc_a_6_2_c_1_d_6
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.rotate(SCREAMING_SNAKE_CASE__ , 5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(SCREAMING_SNAKE_CASE__ , 30 ),
c,
d,
)
SCREAMING_SNAKE_CASE__ : List[Any] = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Any = b"""Test String"""
assert SHAaHash(_snake_case ).final_hash() == hashlib.shaa(_snake_case ).hexdigest() # noqa: S324
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" ,dest="""input_string""" ,default="""Hello World!! Welcome to Cryptography""" ,help="""Hash the string""" ,)
parser.add_argument("""--file""" ,dest="""input_file""" ,help="""Hash contents of a file""" )
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"""rb""" ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = f.read()
else:
SCREAMING_SNAKE_CASE__ : Dict = bytes(_snake_case ,"""utf-8""" )
print(SHAaHash(_snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 25 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase__ : str = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase__ : List[str] = '"text": ["foo", "foo"]'
UpperCAmelCase__ : List[Any] = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : int = 200
__UpperCamelCase : int = {'''Content-Length''': '''100'''}
__UpperCamelCase : Tuple = {}
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
return [bytes(SCREAMING_SNAKE_CASE__ , """utf-8""" )]
def lowercase_ ( *_snake_case ,**_snake_case ):
return MockResponse()
@pytest.mark.parametrize("""urls_type""" ,[str, list, dict] )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
import requests
monkeypatch.setattr(_snake_case ,"""request""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : str = URL
if issubclass(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = url
elif issubclass(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [url]
elif issubclass(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = {"""train""": url}
SCREAMING_SNAKE_CASE__ : Tuple = """dummy"""
SCREAMING_SNAKE_CASE__ : Dict = """downloads"""
SCREAMING_SNAKE_CASE__ : List[Any] = tmp_path
SCREAMING_SNAKE_CASE__ : Tuple = DownloadConfig(
cache_dir=os.path.join(_snake_case ,_snake_case ) ,use_etag=_snake_case ,)
SCREAMING_SNAKE_CASE__ : Any = DownloadManager(dataset_name=_snake_case ,download_config=_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = dl_manager.download(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [downloaded_paths]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [urls]
elif isinstance(_snake_case ,_snake_case ):
assert "train" in downloaded_paths.keys()
SCREAMING_SNAKE_CASE__ : Dict = downloaded_paths.values()
SCREAMING_SNAKE_CASE__ : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_snake_case ,_snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
SCREAMING_SNAKE_CASE__ : Dict = Path(_snake_case )
SCREAMING_SNAKE_CASE__ : int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
SCREAMING_SNAKE_CASE__ : List[str] = downloaded_path.read_text()
assert content == CONTENT
SCREAMING_SNAKE_CASE__ : Optional[int] = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
SCREAMING_SNAKE_CASE__ : Any = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" ,[str, list, dict] )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = str(_snake_case )
if issubclass(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = filename
elif issubclass(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [filename]
elif issubclass(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {"""train""": filename}
SCREAMING_SNAKE_CASE__ : List[Any] = """dummy"""
SCREAMING_SNAKE_CASE__ : List[str] = xz_file.parent
SCREAMING_SNAKE_CASE__ : Dict = """extracted"""
SCREAMING_SNAKE_CASE__ : Optional[int] = DownloadConfig(
cache_dir=_snake_case ,use_etag=_snake_case ,)
SCREAMING_SNAKE_CASE__ : Tuple = DownloadManager(dataset_name=_snake_case ,download_config=_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = dl_manager.extract(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [extracted_paths]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [paths]
elif isinstance(_snake_case ,_snake_case ):
assert "train" in extracted_paths.keys()
SCREAMING_SNAKE_CASE__ : str = extracted_paths.values()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_snake_case ,_snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
SCREAMING_SNAKE_CASE__ : Tuple = Path(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_snake_case ,etag=_snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = extracted_path.read_text()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowercase_ ( _snake_case ,_snake_case ):
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(_snake_case ,start=1 ):
SCREAMING_SNAKE_CASE__ : List[str] = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" ,["""tar_jsonl_path""", """zip_jsonl_path"""] )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : str = request.getfixturevalue(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) ,start=1 ):
_test_jsonl(_snake_case ,_snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" ,["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = request.getfixturevalue(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_snake_case ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_snake_case ) ,start=1 ):
_test_jsonl(_snake_case ,_snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_snake_case ) ,start=1 ):
assert os.path.basename(_snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 25 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 1 |
"""simple docstring"""
from math import pi, sqrt
def lowercase_ ( _snake_case ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_snake_case ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowercase_ ( ):
assert gamma(0.5 ) == sqrt(_snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase__ : Dict = 1.0
while num:
UpperCAmelCase__ : List[Any] = float(input('Gamma of: '))
print(f"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 25 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {'vocab_file': 'vocab.txt'}
UpperCAmelCase__ : List[Any] = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
UpperCAmelCase__ : Optional[Any] = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
UpperCAmelCase__ : str = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = ConvBertTokenizer
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Any:
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ : Any = do_lower_case
SCREAMING_SNAKE_CASE__ : Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE__ : Any = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : Any = normalizer_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_lower_case
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 25 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ : List[Any] = logging.getLogger()
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join(_snake_case )
Path(_snake_case ).open("""w""" ).writelines(_snake_case )
UpperCAmelCase__ : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
UpperCAmelCase__ : Optional[int] = 'sshleifer/bart-tiny-random'
UpperCAmelCase__ : Dict = 'sshleifer/tiny-mbart'
UpperCAmelCase__ : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : List[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : str = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
# os.remove(Path(output_file_name))
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
self.run_eval_tester(SCREAMING_SNAKE_CASE__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
SCREAMING_SNAKE_CASE__ : int = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE__ : Any = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
SCREAMING_SNAKE_CASE__ : List[str] = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """scores.json""" )
SCREAMING_SNAKE_CASE__ : Tuple = str(tmp_dir / """val.target""" )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""en"""] )
_dump_articles(SCREAMING_SNAKE_CASE__ , text["""de"""] )
SCREAMING_SNAKE_CASE__ : str = """translation_en_to_de""" if model == T5_TINY else """summarization"""
SCREAMING_SNAKE_CASE__ : List[Any] = F'''
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE__ )}
{str(SCREAMING_SNAKE_CASE__ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""]
SCREAMING_SNAKE_CASE__ : Any = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE__ ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE__ ) )
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=0.6 , SCREAMING_SNAKE_CASE__=None , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : Dict = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = patch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : List[str] = use_labels
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_ratio
SCREAMING_SNAKE_CASE__ : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFViTMAEModel(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
# expected sequence length = num_patches
SCREAMING_SNAKE_CASE__ : str = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Any = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : Any = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__UpperCamelCase : List[str] = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {}
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : List[str] = False
__UpperCamelCase : List[str] = False
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = TFViTMAEModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __magic_name__ (self ) -> int:
"""simple docstring"""
pass
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.layers.Layer ) )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = outputs_dict[0].numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Dict = v.numpy()
else:
SCREAMING_SNAKE_CASE__ : int = np.array(SCREAMING_SNAKE_CASE__ )
return inputs_np_dict
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prepare_numpy_arrays(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = model(**SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
self.assert_outputs_same(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.constant(SCREAMING_SNAKE_CASE__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE__ : List[Any] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ),)
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE__ , """_keras_serializable""" , SCREAMING_SNAKE_CASE__ )
}
SCREAMING_SNAKE_CASE__ : str = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = main_layer_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
SCREAMING_SNAKE_CASE__ : List[Any] = tf.keras.Model(SCREAMING_SNAKE_CASE__ , outputs=main_layer(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : List[str] = model(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , """keras_model.h5""" )
model.save(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE__ , tf.keras.Model )
SCREAMING_SNAKE_CASE__ : str = model(SCREAMING_SNAKE_CASE__ )
self.assert_outputs_same(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.last_hidden_state.numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = 0
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = outputs.logits.numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE__ , saved_model=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
if model_class.__name__ == "TFViTMAEModel":
SCREAMING_SNAKE_CASE__ : Dict = after_outputs["""last_hidden_state"""].numpy()
SCREAMING_SNAKE_CASE__ : Any = 0
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = after_outputs["""logits"""].numpy()
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1E-5 )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Dict = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
SCREAMING_SNAKE_CASE__ : Any = model_class.from_config(model.config )
SCREAMING_SNAKE_CASE__ : str = new_model(SCREAMING_SNAKE_CASE__ ) # Build model
new_model.set_weights(model.get_weights() )
SCREAMING_SNAKE_CASE__ : Dict = new_model(SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
self.assert_outputs_same(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __magic_name__ (self ) -> int:
"""simple docstring"""
pass
@slow
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
SCREAMING_SNAKE_CASE__ : int = self.default_image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTMAEConfig()
SCREAMING_SNAKE_CASE__ : Dict = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ : int = np.random.uniform(size=(1, num_patches) )
# forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(**SCREAMING_SNAKE_CASE__ , noise=SCREAMING_SNAKE_CASE__ )
# verify the logits
SCREAMING_SNAKE_CASE__ : Any = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
| 25 |
"""simple docstring"""
UpperCAmelCase__ : Any = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase__ : Any = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase__ : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 25 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
def update_area_of_max_square(_snake_case ,_snake_case ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
SCREAMING_SNAKE_CASE__ : Optional[int] = update_area_of_max_square(_snake_case ,col + 1 )
SCREAMING_SNAKE_CASE__ : List[Any] = update_area_of_max_square(row + 1 ,col + 1 )
SCREAMING_SNAKE_CASE__ : int = update_area_of_max_square(row + 1 ,_snake_case )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : Dict = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : List[Any] = max(largest_square_area[0] ,_snake_case )
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : int = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
def update_area_of_max_square_using_dp_array(
_snake_case ,_snake_case ,_snake_case ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
SCREAMING_SNAKE_CASE__ : List[str] = update_area_of_max_square_using_dp_array(_snake_case ,col + 1 ,_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = update_area_of_max_square_using_dp_array(row + 1 ,_snake_case ,_snake_case )
if mat[row][col]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 + min([right, diagonal, down] )
SCREAMING_SNAKE_CASE__ : Any = max(largest_square_area[0] ,_snake_case )
SCREAMING_SNAKE_CASE__ : int = sub_problem_sol
return sub_problem_sol
else:
return 0
SCREAMING_SNAKE_CASE__ : int = [0]
SCREAMING_SNAKE_CASE__ : Any = [[-1] * cols for _ in range(_snake_case )]
update_area_of_max_square_using_dp_array(0 ,0 ,_snake_case )
return largest_square_area[0]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
SCREAMING_SNAKE_CASE__ : str = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
SCREAMING_SNAKE_CASE__ : Tuple = dp_array[row][col + 1]
SCREAMING_SNAKE_CASE__ : Tuple = dp_array[row + 1][col + 1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dp_array[row + 1][col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : str = 1 + min(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = max(dp_array[row][col] ,_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
return largest_square_area
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : List[str] = [0] * (cols + 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
SCREAMING_SNAKE_CASE__ : Dict = current_row[col + 1]
SCREAMING_SNAKE_CASE__ : str = next_row[col + 1]
SCREAMING_SNAKE_CASE__ : Any = next_row[col]
if mat[row][col] == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1 + min(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : int = max(current_row[col] ,_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_snake_case ,_snake_case ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_snake_case ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase__ : Union[str, Any] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
for attribute in key.split(""".""" ):
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : int = getattr(_snake_case ,_snake_case ).shape
else:
SCREAMING_SNAKE_CASE__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Dict = value
else:
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : List[str] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : str = hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,hf_model.config.feat_extract_norm == """group""" ,)
SCREAMING_SNAKE_CASE__ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
SCREAMING_SNAKE_CASE__ : int = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : Dict = name.split(_snake_case )[0].split(""".""" )[-2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mapped_key.replace("""*""" ,_snake_case )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : Dict = """weight_g"""
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : int = """weight_v"""
elif "bias" in name and "relative_attention_bias" not in name:
SCREAMING_SNAKE_CASE__ : str = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : Any = """weight"""
else:
SCREAMING_SNAKE_CASE__ : List[str] = None
set_recursively(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = full_name.split("""conv_layers.""" )[-1]
SCREAMING_SNAKE_CASE__ : List[str] = name.split(""".""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ : Optional[int] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ):
# load the pre-trained checkpoints
SCREAMING_SNAKE_CASE__ : Any = torch.load(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = WavLMConfigOrig(checkpoint["""cfg"""] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = WavLMOrig(_snake_case )
model.load_state_dict(checkpoint["""model"""] )
model.eval()
if config_path is not None:
SCREAMING_SNAKE_CASE__ : str = WavLMConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Any = WavLMConfig()
SCREAMING_SNAKE_CASE__ : Tuple = WavLMModel(_snake_case )
recursively_load_weights(_snake_case ,_snake_case )
hf_wavlm.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 25 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Any = XGLMTokenizer
__UpperCamelCase : List[str] = XGLMTokenizerFast
__UpperCamelCase : Optional[Any] = True
__UpperCamelCase : str = True
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ : Optional[Any] = XGLMTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """<pad>"""
SCREAMING_SNAKE_CASE__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_08 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = XGLMTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
SCREAMING_SNAKE_CASE__ : Any = XGLMTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Any = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[int] = """I was born in 92000, and this is falsé."""
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """Hello World!"""
SCREAMING_SNAKE_CASE__ : Any = [2, 3_12_27, 44_47, 35]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""input_ids""": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name="""facebook/xglm-564M""" , padding=SCREAMING_SNAKE_CASE__ , )
| 25 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = IFPipeline
__UpperCamelCase : Dict = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
self._test_save_load_local()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : int = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE__ , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE__ , negative_prompt_embeds=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , original_image=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (2_56, 2_56, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowercase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ).loss
SCREAMING_SNAKE_CASE__ : List[Any] = -tf.math.reduce_mean(SCREAMING_SNAKE_CASE__ ).numpy()
SCREAMING_SNAKE_CASE__ : List[str] = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 25 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 )
SCREAMING_SNAKE_CASE__ : int = Accelerator()
SCREAMING_SNAKE_CASE__ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
try:
pickle.loads(pickle.dumps(SCREAMING_SNAKE_CASE__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 25 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase__ : Dict = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase__ : List[Any] = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
UpperCAmelCase__ : Any = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Tuple = VOCAB_FILES_NAMES
__UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : str = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : Tuple = NllbTokenizer
__UpperCamelCase : List[int] = []
__UpperCamelCase : List[int] = []
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
SCREAMING_SNAKE_CASE__ : int = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : str = vocab_file
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : Dict = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ : Any = src_lang if src_lang is not None else """eng_Latn"""
SCREAMING_SNAKE_CASE__ : Dict = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = src_lang
SCREAMING_SNAKE_CASE__ : Dict = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = tgt_lang_id
return inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "eng_Latn" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "fra_Latn" , **SCREAMING_SNAKE_CASE__ , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = src_lang
SCREAMING_SNAKE_CASE__ : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ (self ) -> int:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : List[str] = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : int = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : str = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ : Any = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 25 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case = 50 ):
SCREAMING_SNAKE_CASE__ : Tuple = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
if not (isinstance(_snake_case ,_snake_case ) and isinstance(_snake_case ,_snake_case )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : int = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(1 ,texta_length + 1 ):
for j in range(1 ,texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
SCREAMING_SNAKE_CASE__ : int = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
SCREAMING_SNAKE_CASE__ : List[Any] = i
SCREAMING_SNAKE_CASE__ : List[str] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase__ : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,):
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(_snake_case ,_snake_case ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : int = get_keys_to_not_convert(_snake_case )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Dict = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(_snake_case )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Any = load_in_abit
SCREAMING_SNAKE_CASE__ : Tuple = get_parameter_device(_snake_case )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
SCREAMING_SNAKE_CASE__ : int = replace_with_bnb_layers(_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : str = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : Tuple = name.replace(""".weight""" ,"""""" ).replace(""".bias""" ,"""""" )
SCREAMING_SNAKE_CASE__ : Dict = getattr(_snake_case ,_snake_case ,_snake_case )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(_snake_case ):
param.to(_snake_case )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Dict = replace_with_bnb_layers(
_snake_case ,_snake_case ,modules_to_not_convert=_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_quantized_model_device_map(
_snake_case ,_snake_case ,_snake_case ,max_memory=_snake_case ,no_split_module_classes=_snake_case ,)
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
_snake_case ,_snake_case ,_snake_case ,dtype=bnb_quantization_config.torch_dtype ,offload_folder=_snake_case ,offload_state_dict=_snake_case ,keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules ,offload_abit_bnb=load_in_abit and offload ,)
return dispatch_model(_snake_case ,device_map=_snake_case ,offload_dir=_snake_case )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ):
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : int = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(_snake_case ,_snake_case ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = special_dtypes
SCREAMING_SNAKE_CASE__ : Optional[Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
_snake_case ,low_zero=(device_map == """balanced_low_0""") ,max_memory=_snake_case ,**_snake_case ,)
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_memory
SCREAMING_SNAKE_CASE__ : str = infer_auto_device_map(_snake_case ,**_snake_case )
if isinstance(_snake_case ,_snake_case ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Tuple = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ):
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def lowercase_ ( _snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,):
SCREAMING_SNAKE_CASE__ : Tuple = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : Any = []
current_key_name.append(_snake_case )
if isinstance(_snake_case ,nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : Tuple = """.""".join(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Tuple = bnb.nn.LinearabitLt(
module.in_features ,module.out_features ,module.bias is not None ,has_fpaa_weights=_snake_case ,threshold=bnb_quantization_config.llm_inta_threshold ,)
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Dict = bnb.nn.Linearabit(
module.in_features ,module.out_features ,module.bias is not None ,bnb_quantization_config.bnb_abit_compute_dtype ,compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant ,quant_type=bnb_quantization_config.bnb_abit_quant_type ,)
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
SCREAMING_SNAKE_CASE__ : str = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = module.bias.data
bnb_module.requires_grad_(_snake_case )
setattr(_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = _replace_with_bnb_layers(
_snake_case ,_snake_case ,_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase_ ( _snake_case ):
# Create a copy of the model
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Any = deepcopy(_snake_case ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Tuple = find_tied_parameters(_snake_case )
# For compatibility with Accelerate < 0.18
if isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(_snake_case ,[] )
SCREAMING_SNAKE_CASE__ : Dict = len(_snake_case ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Optional[int] = False
if hasattr(_snake_case ,"""base_model_prefix""" ):
SCREAMING_SNAKE_CASE__ : Dict = not hasattr(_snake_case ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : List[str] = set(_snake_case ) - set(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = list(set(_snake_case ) ) + list(_snake_case )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : Tuple = [""".weight""", """.bias"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = name.replace(_snake_case ,"""""" )
filtered_module_names.append(_snake_case )
return filtered_module_names
def lowercase_ ( _snake_case ):
for m in model.modules():
if isinstance(_snake_case ,bnb.nn.Linearabit ):
return True
return False
def lowercase_ ( _snake_case ):
return next(parameter.parameters() ).device
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(_snake_case ,_snake_case ,0 ,dtype=_snake_case ,value=_snake_case )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Any = tensor_name.split(""".""" )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_snake_case ,_snake_case )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_module
SCREAMING_SNAKE_CASE__ : List[Any] = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : List[Any] = False
offload_weight(module._parameters[tensor_name] ,_snake_case ,_snake_case ,index=_snake_case )
if hasattr(module._parameters[tensor_name] ,"""SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case ,)
else:
offload_weight(_snake_case ,_snake_case ,_snake_case ,index=_snake_case )
offload_weight(_snake_case ,param_name.replace("""weight""" ,"""SCB""" ) ,_snake_case ,index=_snake_case )
set_module_tensor_to_device(_snake_case ,_snake_case ,"""meta""" ,dtype=_snake_case ,value=torch.empty(*param.size() ) )
| 25 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
SCREAMING_SNAKE_CASE__ : List[Any] = to_pil_image(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = pil_image.size
SCREAMING_SNAKE_CASE__ : Tuple = pytesseract.image_to_data(_snake_case ,lang=_snake_case ,output_type="""dict""" ,config=_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [idx for idx, word in enumerate(_snake_case ) if not word.strip()]
SCREAMING_SNAKE_CASE__ : Dict = [word for idx, word in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : List[str] = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : int = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE__ : Tuple = [coord for idx, coord in enumerate(_snake_case ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE__ : List[Any] = []
for x, y, w, h in zip(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_snake_case )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE__ : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_snake_case ,_snake_case ,_snake_case ) )
assert len(_snake_case ) == len(_snake_case ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = ['''pixel_values''']
def __init__(self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "" , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else {"""height""": 2_24, """width""": 2_24}
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize
SCREAMING_SNAKE_CASE__ : Any = size
SCREAMING_SNAKE_CASE__ : List[Any] = resample
SCREAMING_SNAKE_CASE__ : Dict = apply_ocr
SCREAMING_SNAKE_CASE__ : List[str] = ocr_lang
SCREAMING_SNAKE_CASE__ : Tuple = tesseract_config
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
SCREAMING_SNAKE_CASE__ : Any = (size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE__ : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE__ : Dict = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE__ : Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Dict = []
for image in images:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = apply_tesseract(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
words_batch.append(SCREAMING_SNAKE_CASE__ )
boxes_batch.append(SCREAMING_SNAKE_CASE__ )
if do_resize:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [flip_channel_order(SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
SCREAMING_SNAKE_CASE__ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE__ )
if apply_ocr:
SCREAMING_SNAKE_CASE__ : List[Any] = words_batch
SCREAMING_SNAKE_CASE__ : List[str] = boxes_batch
return data
| 25 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(poly_a or [0] )[:]
SCREAMING_SNAKE_CASE__ : Tuple = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
SCREAMING_SNAKE_CASE__ : int = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
SCREAMING_SNAKE_CASE__ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
SCREAMING_SNAKE_CASE__ : Optional[int] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
SCREAMING_SNAKE_CASE__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
SCREAMING_SNAKE_CASE__ : Tuple = self.__multiply()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return dft[0]
#
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.c_max_length // 2
while next_ncol > 0:
SCREAMING_SNAKE_CASE__ : Any = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root**next_ncol
# First half of next step
SCREAMING_SNAKE_CASE__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
SCREAMING_SNAKE_CASE__ : int = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE__ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_dft
SCREAMING_SNAKE_CASE__ : Tuple = next_ncol // 2
return dft[0]
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__dft("""A""" )
SCREAMING_SNAKE_CASE__ : Dict = self.__dft("""B""" )
SCREAMING_SNAKE_CASE__ : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
SCREAMING_SNAKE_CASE__ : List[str] = [[] for i in range(SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = self.root ** (next_ncol // 2)
SCREAMING_SNAKE_CASE__ : Any = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
SCREAMING_SNAKE_CASE__ : Optional[Any] = new_inverse_c
next_ncol *= 2
# Unpack
SCREAMING_SNAKE_CASE__ : Optional[Any] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__(self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = """A = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
SCREAMING_SNAKE_CASE__ : int = """A*B = """ + """ + """.join(
F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return F'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" ,["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" ,["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" ,[None, """v2"""] )
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = hf_hub_url(repo_id=_snake_case ,path=_snake_case ,revision=_snake_case )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_snake_case )}'''
| 25 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" ,type=_snake_case ,default=1 ,help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" ,type=_snake_case ,help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) ,)
# rest from the training program
parser.add_argument("""training_script_args""" ,nargs=_snake_case )
return parser.parse_args()
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : int = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : int = script_fpath.stem
SCREAMING_SNAKE_CASE__ : Optional[Any] = importlib.import_module(_snake_case )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : str = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 25 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase__ : str = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Dict = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
SCREAMING_SNAKE_CASE__ : List[Any] = bs[:]
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_snake_case )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE__ : List[Any] = [chr(_snake_case ) for n in cs]
return dict(zip(_snake_case ,_snake_case ) )
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
SCREAMING_SNAKE_CASE__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ : Tuple = char
return pairs
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = VOCAB_FILES_NAMES
__UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
SCREAMING_SNAKE_CASE__ : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
SCREAMING_SNAKE_CASE__ : str = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
SCREAMING_SNAKE_CASE__ : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as vocab_handle:
SCREAMING_SNAKE_CASE__ : int = json.load(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE__ : List[str] = bytes_to_unicode()
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""" ) as merges_handle:
SCREAMING_SNAKE_CASE__ : Dict = merges_handle.read().split("""\n""" )[1:-1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __magic_name__ (self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __magic_name__ (self ) -> int:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ : int = tuple(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = bigram
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
SCREAMING_SNAKE_CASE__ : Tuple = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ : Dict = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ : str = tuple(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ : Tuple = get_pairs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = """ """.join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = word
return word
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(""" """ ) )
return bpe_tokens
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""".join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + """\n""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
SCREAMING_SNAKE_CASE__ : str = token_index
writer.write(""" """.join(SCREAMING_SNAKE_CASE__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE__ : List[Any] = """ """ + text
return (text, kwargs)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> int:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = """ """.join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE__ : Any = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case ):
return 1 if input_a == input_a else 0
def lowercase_ ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = [True] * limit
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : List[Any] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
SCREAMING_SNAKE_CASE__ : Tuple = i * 2
while index < limit:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : int = index + i
SCREAMING_SNAKE_CASE__ : Tuple = [2]
for i in range(3 ,_snake_case ,2 ):
if is_prime[i]:
primes.append(_snake_case )
return primes
def lowercase_ ( _snake_case = 1_000_000 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prime_sieve(_snake_case )
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(len(_snake_case ) ):
for j in range(i + length ,len(_snake_case ) ):
SCREAMING_SNAKE_CASE__ : int = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
SCREAMING_SNAKE_CASE__ : str = j - i
SCREAMING_SNAKE_CASE__ : str = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 25 |
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase__ : Optional[int] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase__ : List[Any] = logging.WARNING
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.getenv("""DATASETS_VERBOSITY""" ,_snake_case )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def lowercase_ ( ):
return __name__.split(""".""" )[0]
def lowercase_ ( ):
return logging.getLogger(_get_library_name() )
def lowercase_ ( ):
# Apply our default configuration to the library root logger.
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def lowercase_ ( _snake_case = None ):
if name is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_library_name()
return logging.getLogger(_snake_case )
def lowercase_ ( ):
return _get_library_root_logger().getEffectiveLevel()
def lowercase_ ( _snake_case ):
_get_library_root_logger().setLevel(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
return set_verbosity(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = args[0] if args else None
def __iter__(self ) -> int:
"""simple docstring"""
return iter(self._iterator )
def __getattr__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__(self ) -> Dict:
"""simple docstring"""
return self
def __exit__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return
UpperCAmelCase__ : str = True
class lowerCAmelCase_ :
"""simple docstring"""
def __call__(self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase__ : Tuple = _tqdm_cls()
def lowercase_ ( ):
global _tqdm_active
return bool(_tqdm_active )
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
def lowercase_ ( ):
global _tqdm_active
SCREAMING_SNAKE_CASE__ : str = False
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = False ):
if radian_mode:
return [magnitude * cos(_snake_case ), magnitude * sin(_snake_case )]
return [magnitude * cos(radians(_snake_case ) ), magnitude * sin(radians(_snake_case ) )]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case = 10**-1 ):
SCREAMING_SNAKE_CASE__ : NDArray[floataa] = cross(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : float = sum(_snake_case )
return abs(_snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCAmelCase__ : int = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
UpperCAmelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCAmelCase__ : List[str] = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
UpperCAmelCase__ : Optional[int] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCAmelCase__ : str = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
UpperCAmelCase__ : Tuple = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 25 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
UpperCAmelCase__ : Optional[int] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : int = '''yolos'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[5_12, 8_64] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : List[str] = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = num_detection_tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_mid_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : List[str] = bbox_cost
SCREAMING_SNAKE_CASE__ : List[Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[str] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Dict = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return 12
| 25 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : List[str] = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = 384
SCREAMING_SNAKE_CASE__ : Tuple = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE__ : int = 96
SCREAMING_SNAKE_CASE__ : str = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE__ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 96
SCREAMING_SNAKE_CASE__ : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : Tuple = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE__ : Tuple = 128
SCREAMING_SNAKE_CASE__ : List[Any] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE__ : Optional[int] = 12
SCREAMING_SNAKE_CASE__ : Optional[int] = 512
elif "large" in model_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 192
SCREAMING_SNAKE_CASE__ : int = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE__ : int = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE__ : List[Any] = 12
SCREAMING_SNAKE_CASE__ : Optional[Any] = 768
# set label information
SCREAMING_SNAKE_CASE__ : Optional[Any] = 150
SCREAMING_SNAKE_CASE__ : Tuple = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE__ : str = json.load(open(hf_hub_download(_snake_case ,_snake_case ,repo_type="""dataset""" ) ,"""r""" ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {int(_snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[Any] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : str = SwinConfig(
embed_dim=_snake_case ,depths=_snake_case ,num_heads=_snake_case ,window_size=_snake_case ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
SCREAMING_SNAKE_CASE__ : int = UperNetConfig(
backbone_config=_snake_case ,auxiliary_in_channels=_snake_case ,num_labels=_snake_case ,idalabel=_snake_case ,labelaid=_snake_case ,)
return config
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = dct.pop(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = val
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ : List[Any] = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ : Tuple = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = x.shape
SCREAMING_SNAKE_CASE__ : List[Any] = x.reshape(_snake_case ,4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Dict = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = x.shape
SCREAMING_SNAKE_CASE__ : Any = x.reshape(_snake_case ,in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(_snake_case ,_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(4 ,in_channel // 4 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : int = x.shape[0]
SCREAMING_SNAKE_CASE__ : List[str] = x.reshape(in_channel // 4 ,4 )
SCREAMING_SNAKE_CASE__ : Tuple = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(_snake_case )
return x
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE__ : Optional[int] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" ,file_name=_snake_case )[
"""state_dict"""
]
for name, param in state_dict.items():
print(_snake_case ,param.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_upernet_config(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = UperNetForSemanticSegmentation(_snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(_snake_case )
if "bn" in key:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""bn""" ,"""batch_norm""" )
SCREAMING_SNAKE_CASE__ : Dict = val
# rename keys
SCREAMING_SNAKE_CASE__ : str = create_rename_keys(_snake_case )
for src, dest in rename_keys:
rename_key(_snake_case ,_snake_case ,_snake_case )
read_in_q_k_v(_snake_case ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = reverse_correct_unfold_reduction_order(_snake_case )
if "norm" in key:
SCREAMING_SNAKE_CASE__ : Tuple = reverse_correct_unfold_norm_order(_snake_case )
model.load_state_dict(_snake_case )
# verify on image
SCREAMING_SNAKE_CASE__ : List[str] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(_snake_case ,stream=_snake_case ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE__ : Optional[int] = processor(_snake_case ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,_snake_case ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCAmelCase__ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 | 1 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 |
"""simple docstring"""
import math
import unittest
def lowercase_ ( _snake_case ):
assert isinstance(_snake_case ,_snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(_snake_case ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 25 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 2
while True:
SCREAMING_SNAKE_CASE__ : str = factor_map.pop(_snake_case ,_snake_case )
if factor:
SCREAMING_SNAKE_CASE__ : List[str] = factor + prime
while x in factor_map:
x += factor
SCREAMING_SNAKE_CASE__ : Tuple = factor
else:
SCREAMING_SNAKE_CASE__ : str = prime
yield prime
prime += 1
def lowercase_ ( _snake_case = 1E10 ):
SCREAMING_SNAKE_CASE__ : List[str] = sieve()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
while True:
SCREAMING_SNAKE_CASE__ : List[str] = next(_snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_snake_case )
n += 2
if __name__ == "__main__":
print(solution())
| 25 |
"""simple docstring"""
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[int] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = 0, 0, 0
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 5
for _ in range(1 ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = min(_snake_case ,_snake_case ,_snake_case )
ugly_nums.append(_snake_case )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(2_0_0) = }""")
| 25 | 1 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase_ :
"""simple docstring"""
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = inputs["""prompt"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["""generator"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs["""num_inference_steps"""]
SCREAMING_SNAKE_CASE__ : List[Any] = inputs["""output_type"""]
if "image" in inputs:
SCREAMING_SNAKE_CASE__ : Any = inputs["""image"""]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if "mask_image" in inputs:
SCREAMING_SNAKE_CASE__ : str = inputs["""mask_image"""]
else:
SCREAMING_SNAKE_CASE__ : str = None
if "original_image" in inputs:
SCREAMING_SNAKE_CASE__ : str = inputs["""original_image"""]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = pipe.encode_prompt(SCREAMING_SNAKE_CASE__ )
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = image
if mask_image is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE__ : str = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = pipe(**SCREAMING_SNAKE_CASE__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipe_loaded.to(SCREAMING_SNAKE_CASE__ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = inputs["""generator"""]
SCREAMING_SNAKE_CASE__ : Any = inputs["""num_inference_steps"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs["""output_type"""]
# inputs with prompt converted to embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
SCREAMING_SNAKE_CASE__ : Any = image
if mask_image is not None:
SCREAMING_SNAKE_CASE__ : int = mask_image
if original_image is not None:
SCREAMING_SNAKE_CASE__ : str = original_image
SCREAMING_SNAKE_CASE__ : List[str] = pipe_loaded(**SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Dict = np.abs(to_np(SCREAMING_SNAKE_CASE__ ) - to_np(SCREAMING_SNAKE_CASE__ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE__ , 1E-4 )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = pipe(**SCREAMING_SNAKE_CASE__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.pipeline_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipe_loaded.to(SCREAMING_SNAKE_CASE__ )
pipe_loaded.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = pipe_loaded(**SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Tuple = np.abs(to_np(SCREAMING_SNAKE_CASE__ ) - to_np(SCREAMING_SNAKE_CASE__ ) ).max()
self.assertLess(SCREAMING_SNAKE_CASE__ , 1E-4 )
| 25 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=1_28 , **SCREAMING_SNAKE_CASE__ , ) -> Tuple:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = qkv_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = frequency_stride
SCREAMING_SNAKE_CASE__ : Any = time_stride
SCREAMING_SNAKE_CASE__ : Optional[int] = max_length
SCREAMING_SNAKE_CASE__ : Any = num_mel_bins
| 25 | 1 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
def lowercase_ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
SCREAMING_SNAKE_CASE__ : Dict = os.getenv("""SM_HP_MP_PARAMETERS""" ,"""{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
SCREAMING_SNAKE_CASE__ : List[str] = json.loads(_snake_case )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
SCREAMING_SNAKE_CASE__ : int = os.getenv("""SM_FRAMEWORK_PARAMS""" ,"""{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.loads(_snake_case )
if not mpi_options.get("""sagemaker_mpi_enabled""" ,_snake_case ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , SCREAMING_SNAKE_CASE__ , )
@cached_property
def __magic_name__ (self ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.device("""cpu""" )
SCREAMING_SNAKE_CASE__ : int = 0
elif is_sagemaker_model_parallel_available():
SCREAMING_SNAKE_CASE__ : Optional[int] = smp.local_rank()
SCREAMING_SNAKE_CASE__ : Any = torch.device("""cuda""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ : str = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
SCREAMING_SNAKE_CASE__ : Dict = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
SCREAMING_SNAKE_CASE__ : List[str] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.device("""cuda""" , self.local_rank )
SCREAMING_SNAKE_CASE__ : str = 1
if device.type == "cuda":
torch.cuda.set_device(SCREAMING_SNAKE_CASE__ )
return device
@property
def __magic_name__ (self ) -> str:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return False
| 25 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase_ ( _snake_case ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" ,"""mmm_image_head""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" ,"""mmm_text_head""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""heads.cmd.itm_head.cls""" ,"""itm_head""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.cmd.itm_head.pooler""" ,"""itm_head.pooler""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""heads.cmd.clip_head.logit_scale""" ,"""flava.logit_scale""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""heads.fairseq_mlm.cls.predictions""" ,"""mlm_head""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""heads.imagenet.mim_head.cls.predictions""" ,"""mim_head""" )
SCREAMING_SNAKE_CASE__ : List[str] = key.replace("""mm_text_projection""" ,"""flava.text_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_image_projection""" ,"""flava.image_to_mm_projection""" )
SCREAMING_SNAKE_CASE__ : str = key.replace("""image_encoder.module""" ,"""flava.image_model""" )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace("""text_encoder.module""" ,"""flava.text_model""" )
SCREAMING_SNAKE_CASE__ : int = key.replace("""mm_encoder.module.encoder.cls_token""" ,"""flava.multimodal_model.cls_token""" )
SCREAMING_SNAKE_CASE__ : Dict = key.replace("""mm_encoder.module""" ,"""flava.multimodal_model""" )
SCREAMING_SNAKE_CASE__ : Any = key.replace("""text_projection""" ,"""flava.text_projection""" )
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace("""image_projection""" ,"""flava.image_projection""" )
SCREAMING_SNAKE_CASE__ : Tuple = value.float()
for key, value in codebook_state_dict.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
if config_path is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = FlavaConfig.from_pretrained(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : List[str] = FlavaConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = FlavaForPreTraining(_snake_case ).eval()
SCREAMING_SNAKE_CASE__ : List[Any] = convert_dalle_checkpoint(_snake_case ,_snake_case ,save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.load(_snake_case ,map_location="""cpu""" )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.hub.load_state_dict_from_url(_snake_case ,map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Dict = upgrade_state_dict(_snake_case ,_snake_case )
hf_model.load_state_dict(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = hf_model.state_dict()
SCREAMING_SNAKE_CASE__ : Any = count_parameters(_snake_case )
SCREAMING_SNAKE_CASE__ : str = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
UpperCAmelCase__ : Optional[int] = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 | 1 |
"""simple docstring"""
import operator
def lowercase_ ( _snake_case ,_snake_case = False ,_snake_case = None ):
SCREAMING_SNAKE_CASE__ : Dict = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE__ : Dict = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE__ : List[str] = [arr.pop(0 )]
for i, item in enumerate(_snake_case ):
if _operator(_snake_case ,sublist[-1] ):
sublist.append(_snake_case )
arr.pop(_snake_case )
# merging sublist into solution list
if not solution:
solution.extend(_snake_case )
else:
while sublist:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sublist.pop(0 )
for i, xx in enumerate(_snake_case ):
if not _operator(_snake_case ,_snake_case ):
solution.insert(_snake_case ,_snake_case )
break
else:
solution.append(_snake_case )
strand_sort(_snake_case ,_snake_case ,_snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 25 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = 'Hello world! cécé herlolip'
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(_snake_case )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : Any = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Any = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1E-5 ,)
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaXLForSequenceClassification(_snake_case ) if classification_head else XLMRobertaXLForMaskedLM(_snake_case )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta.model.classification_heads["""mnli"""].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(_snake_case ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : Tuple = model(_snake_case )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_snake_case ) )
else:
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model(_snake_case )[0]
print(our_output.shape ,their_output.shape )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Tuple = torch.allclose(_snake_case ,_snake_case ,atol=1E-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_snake_case ).mkdir(parents=_snake_case ,exist_ok=_snake_case )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 25 | 1 |
"""simple docstring"""
import os
UpperCAmelCase__ : List[str] = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
while index < len(_snake_case ) - 1:
SCREAMING_SNAKE_CASE__ : List[Any] = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE__ : str = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """"""
SCREAMING_SNAKE_CASE__ : str = num // 1_000
numerals += m_count * "M"
num %= 1_000
SCREAMING_SNAKE_CASE__ : str = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
SCREAMING_SNAKE_CASE__ : int = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase_ ( _snake_case = "/p089_roman.txt" ):
SCREAMING_SNAKE_CASE__ : Dict = 0
with open(os.path.dirname(_snake_case ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE__ : Optional[int] = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE__ : List[Any] = line.strip()
SCREAMING_SNAKE_CASE__ : str = parse_roman_numerals(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = generate_roman_numerals(_snake_case )
savings += len(_snake_case ) - len(_snake_case )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 25 |
"""simple docstring"""
UpperCAmelCase__ : List[str] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
UpperCAmelCase__ : int = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Tuple = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
UpperCAmelCase__ : Union[str, Any] = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
UpperCAmelCase__ : str = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 25 | 1 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = config_class
SCREAMING_SNAKE_CASE__ : int = has_text_modality
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs
SCREAMING_SNAKE_CASE__ : Optional[Any] = common_properties
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE__ : Dict = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ):
try:
setattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=F'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(SCREAMING_SNAKE_CASE__ ):
try:
SCREAMING_SNAKE_CASE__ : Tuple = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , msg=F'''`{name} value {idx} expected, but was {getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE__ : Any = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Any = os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""" )
config_first.to_json_file(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = self.config_class.from_json_file(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config_class(**self.inputs_dict )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
config_first.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = self.config_class.from_pretrained(SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
if self.config_class.is_composition:
return
SCREAMING_SNAKE_CASE__ : str = self.config_class()
self.parent.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.config_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) != value:
wrong_values.append((key, getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), value) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """\n""".join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 25 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCAmelCase__ : List[str] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
UpperCAmelCase__ : List[Any] = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = SavedModel()
SCREAMING_SNAKE_CASE__ : Dict = []
with open(os.path.join(_snake_case ,"""utils""" ,"""tf_ops""" ,"""onnx.json""" ) ) as f:
SCREAMING_SNAKE_CASE__ : Any = json.load(_snake_case )["""opsets"""]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"""rb""" ) as f:
saved_model.ParseFromString(f.read() )
SCREAMING_SNAKE_CASE__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
SCREAMING_SNAKE_CASE__ : int = sorted(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(_snake_case ) > 0:
print(f'''Found the following incompatible ops for the opset {opset}:''' )
print(*_snake_case ,sep="""\n""" )
else:
print(f'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
UpperCAmelCase__ : Dict = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 25 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.