code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str:
"""simple docstring"""
_lowercase =[[] for _ in range(__snake_case )]
_lowercase =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(__snake_case ) <= key:
return input_string
for position, character in enumerate(__snake_case ):
_lowercase =position % (lowest * 2) # puts it in bounds
_lowercase =min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__snake_case )
_lowercase =[''''''.join(__snake_case ) for row in temp_grid]
_lowercase =''''''.join(__snake_case )
return output_string
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> str:
"""simple docstring"""
_lowercase =[]
_lowercase =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
_lowercase =[[] for _ in range(__snake_case )] # generates template
for position in range(len(__snake_case ) ):
_lowercase =position % (lowest * 2) # puts it in bounds
_lowercase =min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
_lowercase =0
for row in temp_grid: # fills in the characters
_lowercase =input_string[counter : counter + len(__snake_case )]
grid.append(list(__snake_case ) )
counter += len(__snake_case )
_lowercase ='''''' # reads as zigzag
for position in range(len(__snake_case ) ):
_lowercase =position % (lowest * 2) # puts it in bounds
_lowercase =min(__snake_case , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase_ ( __snake_case ) -> dict[int, str]:
"""simple docstring"""
_lowercase ={}
for key_guess in range(1 , len(__snake_case ) ): # tries every key
_lowercase =decrypt(__snake_case , __snake_case )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 42
lowercase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 42
lowercase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 100 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 100 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 10 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: List[Any] =copy.deepcopy(self)
lowerCamelCase__: Optional[int] =self.label_schema.copy()
lowerCamelCase__: int =features[self.label_column]
lowerCamelCase__: int =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = "roc_bert"
def __init__( self , __UpperCAmelCase=30_522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase="absolute" , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=768 , __UpperCAmelCase=910 , __UpperCAmelCase=512 , __UpperCAmelCase=24_858 , __UpperCAmelCase=True , **__UpperCAmelCase , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[str] = layer_norm_eps
__UpperCAmelCase : Tuple = use_cache
__UpperCAmelCase : Optional[int] = enable_pronunciation
__UpperCAmelCase : str = enable_shape
__UpperCAmelCase : List[Any] = pronunciation_embed_dim
__UpperCAmelCase : Union[str, Any] = pronunciation_vocab_size
__UpperCAmelCase : Any = shape_embed_dim
__UpperCAmelCase : Dict = shape_vocab_size
__UpperCAmelCase : Optional[int] = concat_input
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
| 16 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 16 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__snake_case : Optional[Any] = """\
Text data.
Second line of data."""
__snake_case : int = """file"""
@pytest.fixture(scope="""session""")
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : List[str] = tmp_path_factory.mktemp("""data""") / (FILE_PATH + """.zstd""")
a_ : Optional[int] = bytes(a__ , """utf-8""")
with zstd.open(a__ , """wb""") as f:
f.write(a__)
return path
@pytest.fixture
def _UpperCAmelCase ( a__):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , a__) , """w""") as f:
f.write(a__)
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""])
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : str = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
a_ : int = input_paths[compression_format]
a_ : List[str] = tmp_path / """cache"""
a_ : List[str] = DownloadConfig(cache_dir=a__ , extract_compressed_file=a__)
a_ : Union[str, Any] = cached_path(a__ , download_config=a__)
with open(a__) as f:
a_ : Dict = f.read()
with open(a__) as f:
a_ : List[str] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False])
@pytest.mark.parametrize("""default_cache_dir""" , [True, False])
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
a_ : Tuple = """custom_cache"""
a_ : str = """custom_extracted_dir"""
a_ : List[str] = tmp_path / """custom_extracted_path"""
if default_extracted:
a_ : Any = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , a__)
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(a__))
a_ : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
a_ : str = xz_file
a_ : str = (
DownloadConfig(extract_compressed_file=a__)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a__)
)
a_ : List[str] = cached_path(a__ , download_config=a__)
assert Path(a__).parent.parts[-2:] == expected
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Tuple = str(Path(a__).resolve())
assert cached_path(a__) == text_file
# relative path
a_ : Dict = str(Path(a__).resolve().relative_to(Path(os.getcwd())))
assert cached_path(a__) == text_file
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Optional[int] = str(tmp_path.resolve() / """__missing_file__.txt""")
with pytest.raises(a__):
cached_path(a__)
# relative path
a_ : str = """./__missing_file__.txt"""
with pytest.raises(a__):
cached_path(a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : int = get_from_cache(f'''tmp://{tmpfs_file}''')
with open(a__) as f:
a_ : Optional[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__)
def _UpperCAmelCase ( ):
'''simple docstring'''
with pytest.raises(a__):
cached_path("""https://huggingface.co""")
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Union[str, Any] = tmp_path_factory.mktemp("""data""") / """file.html"""
with pytest.raises(a__):
http_get("""https://huggingface.co""" , temp_file=a__)
with pytest.raises(a__):
http_head("""https://huggingface.co""")
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Union[str, Any] = tmp_path_factory.mktemp("""data""") / """file.html"""
with pytest.raises(a__):
ftp_get("""ftp://huggingface.co""" , temp_file=a__)
with pytest.raises(a__):
ftp_head("""ftp://huggingface.co""")
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , a__)
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Optional[Any] = tmp_path_factory.mktemp("""data""") / """file.html"""
with pytest.raises(a__):
fsspec_get("""s3://huggingface.co""" , temp_file=a__)
with pytest.raises(a__):
fsspec_head("""s3://huggingface.co""")
| 248 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
a_ : Optional[int] = eval_examples
a_ : Tuple = post_process_function
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]:
a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(_lowercase )
a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Optional[int] = self.compute_metrics
a_ : List[str] = None
a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Any = time.time()
try:
a_ : Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Dict = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
a_ : Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
a_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str:
a_ : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : int = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Union[str, Any] = time.time()
try:
a_ : List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Optional[Any] = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 248 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__snake_case, (list, tuple) ) and isinstance(videos[0], (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__snake_case, (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__snake_case ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = True , __a = 1 / 2_55 , __a = True , __a = None , __a = None , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = resample
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
if "shortest_edge" in size:
_UpperCamelCase = get_resize_output_image_size(__a , size['''shortest_edge'''] , default_to_square=__a)
elif "height" in size and "width" in size:
_UpperCamelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''')
return resize(__a , size=__a , resample=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''')
return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> int:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray:
'''simple docstring'''
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = to_numpy_array(__a)
if do_resize:
_UpperCamelCase = self.resize(image=__a , size=__a , resample=__a)
if do_center_crop:
_UpperCamelCase = self.center_crop(__a , size=__a)
if do_rescale:
_UpperCamelCase = self.rescale(image=__a , scale=__a)
if do_normalize:
_UpperCamelCase = self.normalize(image=__a , mean=__a , std=__a)
_UpperCamelCase = to_channel_dimension_format(__a , __a)
return image
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> PIL.Image.Image:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(__a , default_to_square=__a)
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(__a , param_name='''crop_size''')
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
_UpperCamelCase = make_batched(__a)
_UpperCamelCase = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
_UpperCamelCase = {'''pixel_values''': videos}
return BatchFeature(data=__a , tensor_type=__a)
| 100 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_hidden_groups
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForPreTraining(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForMaskedLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = AlbertForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForTokenClassification(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = AlbertForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCAmelCase ( self , __a , __a , __a=False) -> Tuple:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AlbertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AlbertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''')
_UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
| 100 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_UpperCamelCase : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Any = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 77 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "spiece.model"}
_UpperCAmelCase : Dict = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
}
}
_UpperCAmelCase : int = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[Any] = ["input_ids", "attention_mask"]
__lowercase : List[int] = []
def __init__( self , A_ , A_="<unk>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="[SEP]" , A_="[MASK]" , A_="[CLS]" , A_ = None , **A_ , ) -> None:
"""simple docstring"""
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sep_token=A_ , mask_token=A_ , cls_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , A_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A_ , out_type=A_ )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(A_ )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.sp_model.IdToPiece(A_ )
return token
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = ''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A_ ) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(A_ )
UpperCamelCase = False
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __UpperCamelCase ( self , A_ , A_ = False , A_ = None , A_ = True , **A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = kwargs.pop('use_source_tokenizer' , A_ )
UpperCamelCase = self.convert_ids_to_tokens(A_ , skip_special_tokens=A_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCamelCase = []
UpperCamelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
UpperCamelCase = []
sub_texts.append(A_ )
else:
current_sub_text.append(A_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCamelCase = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(A_ ) )
else:
UpperCamelCase = ''.join(A_ )
UpperCamelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCamelCase = self.clean_up_tokenization(A_ )
return clean_text
else:
return text
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 222 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : Tuple ={
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _A ( lowerCamelCase__ ):
snake_case__ : List[Any] = 'gpt_neox_japanese'
def __init__( self , __lowerCAmelCase=3_2000 , __lowerCAmelCase=2560 , __lowerCAmelCase=32 , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=1.0_0 , __lowerCAmelCase=1_0000 , __lowerCAmelCase=2048 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-5 , __lowerCAmelCase=True , __lowerCAmelCase=3_1996 , __lowerCAmelCase=3_1999 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_multiple_size
lowercase = hidden_act
lowercase = rotary_pct
lowercase = rotary_emb_base
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = use_cache
lowercase = attention_dropout
lowercase = hidden_dropout
| 355 | """simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase = 1_0_2_4
lowercase = 4_0_9_6
lowercase = 2_4
lowercase = 1_6
lowercase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = False
lowercase = """relu"""
lowercase = 1_0_2_4
lowercase = True
lowercase = False
lowercase = False
# load HuggingFace model
lowercase = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
lowercase = TrOCRForCausalLM(lowerCAmelCase__ )
lowercase = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowerCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowercase = val
else:
lowercase = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = ViTImageProcessor(size=encoder_config.image_size )
lowercase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowercase = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
lowercase = outputs.logits
lowercase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 32 | 0 |
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Union[str, Any] = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def A__ ( SCREAMING_SNAKE_CASE__) -> dict[str, str]:
__snake_case: List[str] = [chr(i + 65) for i in range(26)]
# Remove duplicate characters from key
__snake_case: Any = remove_duplicates(key.upper())
__snake_case: Dict = len(_lowerCamelCase)
# First fill cipher with key characters
__snake_case: int = {alphabet[i]: char for i, char in enumerate(_lowerCamelCase)}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowerCamelCase) , 26):
__snake_case: List[str] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__snake_case: str = alphabet[i - offset]
__snake_case: List[str] = char
return cipher_alphabet
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
return "".join(cipher_map.get(_lowerCamelCase , _lowerCamelCase) for ch in message.upper())
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Optional[int] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowerCamelCase , _lowerCamelCase) for ch in message.upper())
def A__ ( ) -> None:
__snake_case: int = input("""Enter message to encode or decode: """).strip()
__snake_case: List[Any] = input("""Enter keyword: """).strip()
__snake_case: int = input("""Encipher or decipher? E/D:""").strip()[0].lower()
try:
__snake_case: Optional[int] = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""")
__snake_case: Optional[Any] = create_cipher_map(_lowerCamelCase)
print(func(_lowerCamelCase , _lowerCamelCase))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 111 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = CTRLTokenizer
snake_case__ = False
snake_case__ = False
def lowerCamelCase__ ( self : Union[str, Any] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : Optional[int] = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
__lowerCamelCase : str = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__lowerCamelCase : Any = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
__lowerCamelCase : Dict = {"unk_token": "<unk>"}
__lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
def lowerCamelCase__ ( self : Tuple , **UpperCAmelCase : List[str] ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Dict ):
__lowerCamelCase : Any = "adapt react readapt apt"
__lowerCamelCase : Dict = "adapt react readapt apt"
return input_text, output_text
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Dict = "adapt react readapt apt"
__lowerCamelCase : Dict = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
__lowerCamelCase : List[str] = tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Any = tokens + [tokenizer.unk_token]
__lowerCamelCase : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase ) | 135 | 0 |
"""simple docstring"""
from torch import nn
def _lowerCamelCase(__UpperCamelCase ) -> Union[str, Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 350 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
_lowerCAmelCase =1
_lowerCAmelCase =3
_lowerCAmelCase =(32, 32)
_lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(__UpperCAmelCase )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
_lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =self.dummy_cond_unet_upscale
_lowerCAmelCase =DDPMScheduler()
_lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" )
_lowerCAmelCase =self.dummy_vae
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase =unet.half()
_lowerCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase =StableDiffusionUpscalePipeline(
unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , )
_lowerCAmelCase =sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_lowerCAmelCase ="""A painting of a squirrel eating a burger"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =sd_pipe(
[prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images
_lowerCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> Optional[Any]:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self ) -> Tuple:
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
_lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler"""
_lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase ="""a cat sitting on a park bench"""
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 341 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a__ :
def __init__( self : Optional[Any], lowerCAmelCase : int, lowerCAmelCase : Any=13, lowerCAmelCase : Tuple=30, lowerCAmelCase : int=2, lowerCAmelCase : Optional[int]=3, lowerCAmelCase : Optional[Any]=True, lowerCAmelCase : Tuple=True, lowerCAmelCase : Tuple=32, lowerCAmelCase : int=2, lowerCAmelCase : int=4, lowerCAmelCase : str=37, lowerCAmelCase : Dict="gelu", lowerCAmelCase : List[Any]=0.1, lowerCAmelCase : Optional[int]=0.1, lowerCAmelCase : Optional[int]=10, lowerCAmelCase : List[Any]=0.02, lowerCAmelCase : str=3, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : int=2, ) -> Dict:
lowercase : Optional[Any] = parent
lowercase : Dict = batch_size
lowercase : int = image_size
lowercase : str = patch_size
lowercase : List[Any] = num_channels
lowercase : Optional[int] = is_training
lowercase : Optional[Any] = use_labels
lowercase : Optional[Any] = hidden_size
lowercase : Optional[Any] = num_hidden_layers
lowercase : List[Any] = num_attention_heads
lowercase : Tuple = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Dict = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : int = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : str = scope
lowercase : List[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase : Union[str, Any] = (image_size // patch_size) ** 2
lowercase : Optional[Any] = num_patches + 2
def lowercase ( self : Tuple ) -> List[Any]:
lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Any = None
if self.use_labels:
lowercase : Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Union[str, Any] ) -> Any:
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def lowercase ( self : List[str], lowerCAmelCase : Dict, lowerCAmelCase : Optional[Any], lowerCAmelCase : Optional[Any] ) -> int:
lowercase : Optional[Any] = TFDeiTModel(config=lowerCAmelCase )
lowercase : List[str] = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any], lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : Dict ) -> int:
lowercase : List[str] = TFDeiTForMaskedImageModeling(config=lowerCAmelCase )
lowercase : Optional[Any] = model(lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase : Optional[Any] = 1
lowercase : Optional[int] = TFDeiTForMaskedImageModeling(lowerCAmelCase )
lowercase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Optional[int] = model(lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase ( self : List[Any], lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : List[Any] ) -> Union[str, Any]:
lowercase : Optional[Any] = self.type_sequence_label_size
lowercase : Any = TFDeiTForImageClassification(lowerCAmelCase )
lowercase : Optional[int] = model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase : Any = 1
lowercase : int = TFDeiTForImageClassification(lowerCAmelCase )
lowercase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : List[str] = model(lowerCAmelCase, labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase : Optional[int] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Any = config_and_inputs
lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowercase ( self : str ) -> Union[str, Any]:
lowercase : Optional[Any] = TFDeiTModelTester(self )
lowercase : List[Any] = ConfigTester(self, config_class=lowerCAmelCase, has_text_modality=lowerCAmelCase, hidden_size=37 )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowercase ( self : Optional[Any] ) -> int:
pass
def lowercase ( self : List[Any] ) -> int:
lowercase , lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Union[str, Any] = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowercase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase, tf.keras.layers.Dense ) )
def lowercase ( self : Tuple ) -> Tuple:
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any = model_class(lowerCAmelCase )
lowercase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : int = [*signature.parameters.keys()]
lowercase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCAmelCase )
def lowercase ( self : int ) -> Union[str, Any]:
lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowercase ( self : Any ) -> Optional[int]:
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase )
def lowercase ( self : List[str] ) -> int:
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : Any, lowerCAmelCase : str, lowerCAmelCase : Any=False ) -> int:
lowercase : str = super()._prepare_for_class(lowerCAmelCase, lowerCAmelCase, return_labels=lowerCAmelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase ( self : Tuple ) -> Optional[int]:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[str] = TFDeiTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def lowercase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : str ) -> Dict:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowercase ( self : Tuple ) -> Dict:
lowercase : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowercase : str = self.default_image_processor
lowercase : Tuple = prepare_img()
lowercase : Dict = image_processor(images=lowerCAmelCase, return_tensors='tf' )
# forward pass
lowercase : Any = model(**lowerCAmelCase )
# verify the logits
lowercase : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, lowerCAmelCase )
lowercase : Tuple = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCAmelCase, atol=1e-4 ) )
| 255 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = None ) -> list[list[str]]:
'''simple docstring'''
lowercase : str = word_bank or []
# create a table
lowercase : int = len(_UpperCAmelCase ) + 1
lowercase : list[list[list[str]]] = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
lowercase : int = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 255 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Dict = (DPMSolverSinglestepScheduler,)
a__ : List[Any] = (("""num_inference_steps""", 2_5),)
def UpperCamelCase__ ( self , **__lowercase) -> Dict:
__UpperCamelCase :Optional[Any] = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**__lowercase)
return config
def UpperCamelCase__ ( self , __lowercase=0 , **__lowercase) -> Tuple:
__UpperCamelCase :str = dict(self.forward_default_kwargs)
__UpperCamelCase :Dict = kwargs.pop('''num_inference_steps''' , __lowercase)
__UpperCamelCase :str = self.dummy_sample
__UpperCamelCase :Dict = 0.1 * sample
__UpperCamelCase :Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase :List[Any] = self.get_scheduler_config(**__lowercase)
__UpperCamelCase :Optional[int] = scheduler_class(**__lowercase)
scheduler.set_timesteps(__lowercase)
# copy over dummy past residuals
__UpperCamelCase :str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase)
__UpperCamelCase :str = scheduler_class.from_pretrained(__lowercase)
new_scheduler.set_timesteps(__lowercase)
# copy over dummy past residuals
__UpperCamelCase :str = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase :Tuple = sample, sample
for t in range(__lowercase , time_step + scheduler.config.solver_order + 1):
__UpperCamelCase :Any = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
__UpperCamelCase :Any = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self) -> Any:
pass
def UpperCamelCase__ ( self , __lowercase=0 , **__lowercase) -> int:
__UpperCamelCase :Optional[Any] = dict(self.forward_default_kwargs)
__UpperCamelCase :Dict = kwargs.pop('''num_inference_steps''' , __lowercase)
__UpperCamelCase :List[Any] = self.dummy_sample
__UpperCamelCase :str = 0.1 * sample
__UpperCamelCase :Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase :Optional[Any] = self.get_scheduler_config()
__UpperCamelCase :List[str] = scheduler_class(**__lowercase)
scheduler.set_timesteps(__lowercase)
# copy over dummy past residuals (must be after setting timesteps)
__UpperCamelCase :Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase)
__UpperCamelCase :str = scheduler_class.from_pretrained(__lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase)
# copy over dummy past residual (must be after setting timesteps)
__UpperCamelCase :str = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase :int = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
__UpperCamelCase :Dict = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase__ ( self , __lowercase=None , **__lowercase) -> List[str]:
if scheduler is None:
__UpperCamelCase :int = self.scheduler_classes[0]
__UpperCamelCase :Dict = self.get_scheduler_config(**__lowercase)
__UpperCamelCase :Dict = scheduler_class(**__lowercase)
__UpperCamelCase :Optional[int] = self.scheduler_classes[0]
__UpperCamelCase :str = self.get_scheduler_config(**__lowercase)
__UpperCamelCase :Optional[int] = scheduler_class(**__lowercase)
__UpperCamelCase :Tuple = 10
__UpperCamelCase :str = self.dummy_model()
__UpperCamelCase :List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase)
for i, t in enumerate(scheduler.timesteps):
__UpperCamelCase :Tuple = model(__lowercase , __lowercase)
__UpperCamelCase :str = scheduler.step(__lowercase , __lowercase , __lowercase).prev_sample
return sample
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
__UpperCamelCase :Any = 50
__UpperCamelCase :Optional[Any] = self.dummy_model()
__UpperCamelCase :Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
__UpperCamelCase :List[Any] = model(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = scheduler.step(__lowercase , __lowercase , __lowercase).prev_sample
__UpperCamelCase :Tuple = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.25_74) < 1E-3
def UpperCamelCase__ ( self) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__UpperCamelCase :Any = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
__UpperCamelCase :List[str] = self.full_loop(scheduler=__lowercase)
__UpperCamelCase :Dict = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.27_91) < 1E-3
__UpperCamelCase :Dict = DEISMultistepScheduler.from_config(scheduler.config)
__UpperCamelCase :List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config)
__UpperCamelCase :int = UniPCMultistepScheduler.from_config(scheduler.config)
__UpperCamelCase :Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config)
__UpperCamelCase :int = self.full_loop(scheduler=__lowercase)
__UpperCamelCase :List[Any] = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.27_91) < 1E-3
def UpperCamelCase__ ( self) -> Tuple:
self.check_over_configs(thresholding=__lowercase)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowercase , prediction_type=__lowercase , sample_max_value=__lowercase , algorithm_type='''dpmsolver++''' , solver_order=__lowercase , solver_type=__lowercase , )
def UpperCamelCase__ ( self) -> int:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowercase , solver_type=__lowercase , prediction_type=__lowercase , algorithm_type=__lowercase , )
__UpperCamelCase :List[str] = self.full_loop(
solver_order=__lowercase , solver_type=__lowercase , prediction_type=__lowercase , algorithm_type=__lowercase , )
assert not torch.isnan(__lowercase).any(), "Samples have nan numbers"
def UpperCamelCase__ ( self) -> Dict:
self.check_over_configs(lower_order_final=__lowercase)
self.check_over_configs(lower_order_final=__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def UpperCamelCase__ ( self) -> Dict:
self.check_over_configs(variance_type=__lowercase)
self.check_over_configs(variance_type='''learned_range''')
def UpperCamelCase__ ( self) -> Tuple:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__lowercase , time_step=0)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :int = self.full_loop()
__UpperCamelCase :Dict = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.27_91) < 1E-3
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[str] = self.full_loop(use_karras_sigmas=__lowercase)
__UpperCamelCase :int = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.22_48) < 1E-3
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = self.full_loop(prediction_type='''v_prediction''')
__UpperCamelCase :int = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.14_53) < 1E-3
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Any = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__lowercase)
__UpperCamelCase :int = torch.mean(torch.abs(__lowercase))
assert abs(result_mean.item() - 0.06_49) < 1E-3
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :int = self.scheduler_classes[0]
__UpperCamelCase :Optional[int] = self.get_scheduler_config(thresholding=__lowercase , dynamic_thresholding_ratio=0)
__UpperCamelCase :List[Any] = scheduler_class(**__lowercase)
__UpperCamelCase :Dict = 10
__UpperCamelCase :Optional[Any] = self.dummy_model()
__UpperCamelCase :Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowercase)
for i, t in enumerate(scheduler.timesteps):
__UpperCamelCase :str = model(__lowercase , __lowercase)
__UpperCamelCase :Union[str, Any] = scheduler.step(__lowercase , __lowercase , __lowercase).prev_sample
assert sample.dtype == torch.floataa
| 361 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowercase = logging.getLogger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """token-classification"""
def __init__( self , __lowercase) -> str:
if type(__lowercase) == dict:
__UpperCamelCase :List[Any] = Namespace(**__lowercase)
__UpperCamelCase :Dict = import_module('''tasks''')
try:
__UpperCamelCase :str = getattr(__lowercase , hparams.task_type)
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""")
__UpperCamelCase :Tuple = self.token_classification_task.get_labels(hparams.labels)
__UpperCamelCase :Tuple = CrossEntropyLoss().ignore_index
super().__init__(__lowercase , len(self.labels) , self.mode)
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
return self.model(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Any:
__UpperCamelCase :str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Dict = self(**__lowercase)
__UpperCamelCase :str = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
__UpperCamelCase :int = self._feature_file(__lowercase)
if os.path.exists(__lowercase) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :Any = torch.load(__lowercase)
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir)
__UpperCamelCase :Any = self.token_classification_task.read_examples_from_file(args.data_dir , __lowercase)
__UpperCamelCase :Union[str, Any] = self.token_classification_task.convert_examples_to_features(
__lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet''']) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(self.config.model_type in ['''xlnet''']) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __lowercase)
torch.save(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = False) -> DataLoader:
__UpperCamelCase :Tuple = self._feature_file(__lowercase)
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :str = torch.load(__lowercase)
__UpperCamelCase :int = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
__UpperCamelCase :Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
__UpperCamelCase :str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
__UpperCamelCase :Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
__UpperCamelCase :int = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase) , batch_size=__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
"""Compute validation""" ""
__UpperCamelCase :int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Any = self(**__lowercase)
__UpperCamelCase , __UpperCamelCase :Tuple = outputs[:2]
__UpperCamelCase :List[str] = logits.detach().cpu().numpy()
__UpperCamelCase :List[str] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :Tuple = torch.stack([x['''val_loss'''] for x in outputs]).mean()
__UpperCamelCase :str = np.concatenate([x['''pred'''] for x in outputs] , axis=0)
__UpperCamelCase :Any = np.argmax(__lowercase , axis=2)
__UpperCamelCase :str = np.concatenate([x['''target'''] for x in outputs] , axis=0)
__UpperCamelCase :List[str] = dict(enumerate(self.labels))
__UpperCamelCase :Tuple = [[] for _ in range(out_label_ids.shape[0])]
__UpperCamelCase :Any = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
__UpperCamelCase :Any = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__lowercase , __lowercase),
'''precision''': precision_score(__lowercase , __lowercase),
'''recall''': recall_score(__lowercase , __lowercase),
'''f1''': fa_score(__lowercase , __lowercase),
}
__UpperCamelCase :Dict = dict(results.items())
__UpperCamelCase :List[str] = results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __lowercase) -> int:
# when stable
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._eval_end(__lowercase)
__UpperCamelCase :Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __lowercase) -> int:
# updating to test_epoch_end instead of deprecated test_end
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = self._eval_end(__lowercase)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__UpperCamelCase :Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> Union[str, Any]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowercase , __lowercase)
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__lowercase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__lowercase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowercase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
return parser
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowercase = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowercase = parser.parse_args()
__lowercase = NERTransformer(args)
__lowercase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__lowercase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 105 | 0 |
'''simple docstring'''
from string import ascii_uppercase
__A : Union[str, Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__A : Optional[Any] = dict(enumerate(ascii_uppercase))
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : str = len(A__ )
lowerCAmelCase_ : Tuple = 0
while True:
if x == i:
lowerCAmelCase_ : Any = 0
if len(A__ ) == len(A__ ):
break
key += key[i]
i += 1
return key
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """"""
lowerCAmelCase_ : Union[str, Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowerCAmelCase_ : Any = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : int = """"""
lowerCAmelCase_ : str = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowerCAmelCase_ : Dict = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Any = """THE GERMAN ATTACK"""
lowerCAmelCase_ : Optional[Any] = """SECRET"""
lowerCAmelCase_ : int = generate_key(A__ , A__ )
lowerCAmelCase_ : Tuple = cipher_text(A__ , A__ )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(A__ , A__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 120 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__A : Optional[Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __snake_case ( datasets.BuilderConfig):
"""simple docstring"""
lowercase = None
def UpperCamelCase_ ( A__ : "pyspark.sql.DataFrame" , A__ : List[int] , ):
'''simple docstring'''
import pyspark
def generate_fn():
lowerCAmelCase_ : Union[str, Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowerCAmelCase_ : int = df_with_partition_id.select("""*""" ).where(f'part_id = {partition_id}' ).drop("""part_id""" )
lowerCAmelCase_ : Union[str, Any] = partition_df.collect()
lowerCAmelCase_ : str = 0
for row in rows:
yield f'{partition_id}_{row_id}', row.asDict()
row_id += 1
return generate_fn
class __snake_case ( _BaseExamplesIterable):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : List[Any]=None , ) -> Optional[Any]:
lowerCAmelCase_ : str = df
lowerCAmelCase_ : List[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCAmelCase_ : List[str] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : Dict ) -> int:
yield from self.generate_examples_fn()
def __lowercase ( self : Any , lowerCamelCase : np.random.Generator ) -> "SparkExamplesIterable":
lowerCAmelCase_ : Optional[int] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int ) -> "SparkExamplesIterable":
lowerCAmelCase_ : Tuple = self.split_shard_indices_by_worker(lowerCamelCase , lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
@property
def __lowercase ( self : str ) -> int:
return len(self.partition_order )
class __snake_case ( datasets.DatasetBuilder):
"""simple docstring"""
lowercase = SparkConfig
def __init__( self : Union[str, Any] , lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : str = None , lowerCamelCase : str = None , **lowerCamelCase : int , ) -> Tuple:
import pyspark
lowerCAmelCase_ : Dict = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCAmelCase_ : List[str] = df
lowerCAmelCase_ : List[Any] = working_dir
super().__init__(
cache_dir=lowerCamelCase , config_name=str(self.df.semanticHash() ) , **lowerCamelCase , )
def __lowercase ( self : Tuple ) -> Union[str, Any]:
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCamelCase : int ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCamelCase )
lowerCAmelCase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCAmelCase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __lowercase ( self : Any ) -> List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __lowercase ( self : str , lowerCamelCase : datasets.download.download_manager.DownloadManager ) -> Optional[int]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowercase ( self : Any , lowerCamelCase : Tuple ) -> Optional[Any]:
import pyspark
def get_arrow_batch_size(lowerCamelCase : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowerCAmelCase_ : str = self.df.count()
lowerCAmelCase_ : Any = df_num_rows if df_num_rows <= 1_00 else 1_00
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCAmelCase_ : str = (
self.df.limit(lowerCamelCase )
.repartition(1 )
.mapInArrow(lowerCamelCase , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCAmelCase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCAmelCase_ : Tuple = min(lowerCamelCase , int(approx_total_size / max_shard_size ) )
lowerCAmelCase_ : Dict = self.df.repartition(lowerCamelCase )
def __lowercase ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
lowerCAmelCase_ : List[str] = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowerCAmelCase_ : int = os.path.join(self._working_dir , os.path.basename(lowerCamelCase ) ) if self._working_dir else fpath
lowerCAmelCase_ : Tuple = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCAmelCase_ : str = self.config.features
lowerCAmelCase_ : Tuple = self._writer_batch_size
lowerCAmelCase_ : Any = self._fs.storage_options
def write_arrow(lowerCamelCase : Optional[int] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCAmelCase_ : Union[str, Any] = pyspark.TaskContext().taskAttemptId()
lowerCAmelCase_ : Union[str, Any] = next(lowerCamelCase , lowerCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Dict = writer_class(
features=lowerCamelCase , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
lowerCAmelCase_ : str = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCAmelCase_, lowerCAmelCase_ : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowerCAmelCase_ : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
lowerCAmelCase_ : Tuple = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase )
if writer._num_bytes > 0:
lowerCAmelCase_, lowerCAmelCase_ : List[str] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase ) ):
lowerCAmelCase_ : str = os.path.join(os.path.dirname(lowerCamelCase ) , os.path.basename(lowerCamelCase ) )
shutil.move(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Dict = (
self.df.mapInArrow(lowerCamelCase , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowercase ( self : Any , lowerCamelCase : "datasets.SplitGenerator" , lowerCamelCase : str = "arrow" , lowerCamelCase : Optional[Union[str, int]] = None , lowerCamelCase : Optional[int] = None , **lowerCamelCase : List[str] , ) -> Optional[int]:
self._validate_cache_dir()
lowerCAmelCase_ : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = not is_remote_filesystem(self._fs )
lowerCAmelCase_ : Dict = os.path.join if is_local else posixpath.join
lowerCAmelCase_ : int = """-TTTTT-SSSSS-of-NNNNN"""
lowerCAmelCase_ : Any = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}'
lowerCAmelCase_ : List[str] = path_join(self._output_dir , lowerCamelCase )
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Dict = []
for task_id, content in self._prepare_split_single(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
(
(
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
), (
lowerCAmelCase_
),
) : str = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase )
lowerCAmelCase_ : Union[str, Any] = total_num_examples
lowerCAmelCase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(F'Renaming {total_shards} shards.' )
if total_shards > 1:
lowerCAmelCase_ : Optional[int] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCAmelCase_ : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , ):
rename(
lowerCamelCase , fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace("""TTTTT-SSSSS""" , F'{global_shard_id:05d}' ).replace("""NNNNN""" , F'{total_shards:05d}' ) , )
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : str = 0
for i in range(len(lowerCamelCase ) ):
lowerCAmelCase_, lowerCAmelCase_ : Tuple = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase , len(lowerCamelCase ) ).map(lambda lowerCamelCase : _rename_shard(*lowerCamelCase ) ).collect()
else:
# don't use any pattern
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : str = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F'{shard_id:05d}' ).replace("""TTTTT""" , F'{task_id:05d}' ) , fpath.replace(lowerCamelCase , """""" ) , )
def __lowercase ( self : Dict , lowerCamelCase : "datasets.SplitGenerator" , ) -> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 120 | 1 |
"""simple docstring"""
import torch
from torch import nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ : List[str] = n_token
UpperCAmelCase__ : Union[str, Any] = d_embed
UpperCAmelCase__ : Dict = d_proj
UpperCAmelCase__ : List[str] = cutoffs + [n_token]
UpperCAmelCase__ : List[Any] = [0] + self.cutoffs
UpperCAmelCase__ : Tuple = div_val
UpperCAmelCase__ : Union[str, Any] = self.cutoffs[0]
UpperCAmelCase__ : str = len(self.cutoffs ) - 1
UpperCAmelCase__ : List[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ : Dict = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase__ : List[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase__ : Optional[int] = nn.ModuleList()
UpperCAmelCase__ : List[str] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase ) ) )
else:
self.out_projs.append(_lowerCamelCase )
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase ) ) )
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx ) )
UpperCAmelCase__ : int = keep_order
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
if proj is None:
UpperCAmelCase__ : int = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ : List[str] = nn.functional.linear(_lowerCamelCase , proj.t().contiguous() )
UpperCAmelCase__ : Dict = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _a (self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ : Optional[int] = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ : Union[str, Any] = labels[..., 1:].contiguous()
UpperCAmelCase__ : int = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase__ : Dict = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
UpperCAmelCase__ : Any = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase__ : List[str] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase__ : Union[str, Any] = labels != -100
UpperCAmelCase__ : Dict = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase__ : Dict = (
-nn.functional.log_softmax(_lowerCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase__ : int = nn.functional.log_softmax(_lowerCamelCase , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : str = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : Union[str, Any] = self.out_layers[i].weight
UpperCAmelCase__ : Dict = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase__ : Dict = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_lowerCamelCase )
biases.append(_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : Dict = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=1 )
if labels is None:
UpperCAmelCase__ : str = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase__ : Optional[int] = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(_lowerCamelCase ) - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ : Dict = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ : List[Any] = labels.index_select(0 , _lowerCamelCase ) - l_idx
UpperCAmelCase__ : int = head_logprob.index_select(0 , _lowerCamelCase )
UpperCAmelCase__ : Tuple = hidden.index_select(0 , _lowerCamelCase )
else:
UpperCAmelCase__ : List[str] = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ : Optional[int] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : Optional[Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1 )
UpperCAmelCase__ : List[str] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase__ : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ : List[str] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _a (self , _lowerCamelCase ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCAmelCase__ : Optional[Any] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_lowerCamelCase , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Optional[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : Any = self.out_layers[i].weight
UpperCAmelCase__ : Tuple = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_lowerCamelCase )
biases.append(_lowerCamelCase )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : str = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase__ : List[Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1 )
UpperCAmelCase__ : Optional[Any] = [0] + self.cutoffs
for i in range(len(_lowerCamelCase ) - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : Any = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Tuple = nn.functional.log_softmax(_lowerCamelCase , dim=1 )
UpperCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ : Dict = logprob_i
return out
| 166 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase , _lowerCamelCase ) )]
if identifier is not None:
UpperCAmelCase__ : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for n_ in n_identifier:
UpperCAmelCase__ : Optional[int] = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ : List[Any] = [file for file in files if n_identifier not in file]
UpperCAmelCase__ : str = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _lowerCamelCase )
if only_modules:
UpperCAmelCase__ : List[str] = file.split(""".""" )[0]
try:
UpperCAmelCase__ : Any = getattr(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = doctest.DocTestSuite(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = unittest.TextTestRunner().run(_lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase__ : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : Union[str, Any] = """modeling"""
UpperCAmelCase__ : int = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase , ignore_files=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = """tokenization"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = Path("""src/transformers""" )
UpperCAmelCase__ : Optional[int] = """configuration"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_lowerCamelCase , n_identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = Path("""docs/source""" )
UpperCAmelCase__ : str = ["""favicon.ico"""]
self.analyze_directory(_lowerCamelCase , ignore_files=_lowerCamelCase , only_modules=_lowerCamelCase )
| 166 | 1 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int = 1_6 , lowerCAmelCase__ : int = 8_8 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "geglu" , lowerCAmelCase__ : Optional[int] = None , ) -> Tuple:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : Union[str, Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , num_layers=lowerCAmelCase__ , dropout=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , cross_attention_dim=lowerCAmelCase__ , attention_bias=lowerCAmelCase__ , sample_size=lowerCAmelCase__ , num_vector_embeds=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , num_embeds_ada_norm=lowerCAmelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase : Tuple = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase : Optional[Any] = [1, 0]
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = hidden_states
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Any = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase : List[Any] = self.transformer_index_for_condition[i]
_UpperCAmelCase : Union[str, Any] = self.transformers[transformer_index](
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ , cross_attention_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase : Any = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase__ ) | 145 | '''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , lowerCAmelCase__ , )
super().__init__(args=lowerCAmelCase__ , **lowerCAmelCase__ ) | 145 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {str(digit): digit**5 for digit in range(10)}
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_UpperCamelCase ) )
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(_UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 149 | """simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
SCREAMING_SNAKE_CASE__ = "true"
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=8_2 , _UpperCamelCase : int=1_6 ) -> str:
"""simple docstring"""
set_seed(4_2 )
snake_case = RegressionModel()
snake_case = deepcopy(_UpperCamelCase )
snake_case = RegressionDataset(length=_UpperCamelCase )
snake_case = DataLoader(_UpperCamelCase , batch_size=_UpperCamelCase )
model.to(accelerator.device )
snake_case ,snake_case = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
return model, ddp_model, dataloader
def lowerCAmelCase__ ( _UpperCamelCase : Accelerator , _UpperCamelCase : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
snake_case = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(_UpperCamelCase : Optional[Any] ):
snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
with accelerator.main_process_first():
snake_case = dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCamelCase : Optional[Any] ):
if use_longest:
return tokenizer.pad(_UpperCamelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_UpperCamelCase , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=1_6 )
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Any ) -> List[Any]:
"""simple docstring"""
snake_case = Accelerator(dispatch_batches=_UpperCamelCase , split_batches=_UpperCamelCase )
snake_case = get_dataloader(_UpperCamelCase , not dispatch_batches )
snake_case = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCamelCase )
snake_case ,snake_case = accelerator.prepare(_UpperCamelCase , _UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case = []
for batch in dataloader:
snake_case ,snake_case = batch.values()
with torch.no_grad():
snake_case = model(_UpperCamelCase )
snake_case ,snake_case = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
snake_case ,snake_case = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCamelCase )
targs.append(_UpperCamelCase )
snake_case ,snake_case = torch.cat(_UpperCamelCase ), torch.cat(_UpperCamelCase )
return logits, targs
def lowerCAmelCase__ ( _UpperCamelCase : Accelerator , _UpperCamelCase : Tuple=8_2 , _UpperCamelCase : Optional[Any]=False , _UpperCamelCase : int=False , _UpperCamelCase : List[str]=1_6 ) -> Optional[Any]:
"""simple docstring"""
snake_case ,snake_case ,snake_case = get_basic_setup(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case ,snake_case = generate_predictions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert (
len(_UpperCamelCase ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCamelCase )}"""
def lowerCAmelCase__ ( _UpperCamelCase : bool = False , _UpperCamelCase : bool = False ) -> Tuple:
"""simple docstring"""
snake_case = evaluate.load('glue' , 'mrpc' )
snake_case ,snake_case = get_mrpc_setup(_UpperCamelCase , _UpperCamelCase )
# First do baseline
snake_case ,snake_case ,snake_case = setup['no']
model.to(_UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCamelCase )
with torch.inference_mode():
snake_case = model(**_UpperCamelCase )
snake_case = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCamelCase , references=batch['labels'] )
snake_case = metric.compute()
# Then do distributed
snake_case ,snake_case ,snake_case = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
snake_case = model(**_UpperCamelCase )
snake_case = outputs.logits.argmax(dim=-1 )
snake_case = batch['labels']
snake_case ,snake_case = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCamelCase , references=_UpperCamelCase )
snake_case = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase__ ( ) -> Tuple:
"""simple docstring"""
snake_case = Accelerator(split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(_UpperCamelCase , _UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
snake_case = Accelerator(split_batches=_UpperCamelCase , dispatch_batches=_UpperCamelCase )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(_UpperCamelCase , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
snake_case = Accelerator()
test_torch_metrics(_UpperCamelCase , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 149 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__A : Optional[int] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 |
"""simple docstring"""
def lowercase ( __snake_case : Optional[int] ):
lowercase_ : int = 0
lowercase_ : Optional[Any] = len(__snake_case )
for i in range(n - 1 ):
for j in range(i + 1 , __snake_case ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase ( __snake_case : str ):
if len(__snake_case ) <= 1:
return arr, 0
lowercase_ : Optional[Any] = len(__snake_case ) // 2
lowercase_ : List[Any] = arr[0:mid]
lowercase_ : Union[str, Any] = arr[mid:]
lowercase_ , lowercase_ : Tuple = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = count_inversions_recursive(__snake_case )
lowercase_ , lowercase_ : List[Any] = _count_cross_inversions(__snake_case , __snake_case )
lowercase_ : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase ( __snake_case : str , __snake_case : Optional[int] ):
lowercase_ : Optional[Any] = []
lowercase_ : Any = 0
while i < len(__snake_case ) and j < len(__snake_case ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__snake_case ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__snake_case ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase ( ):
lowercase_ : Union[str, Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ : int = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , __snake_case )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ : Dict = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : Dict = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
# an empty list should also have zero inversions
lowercase_ : List[Any] = []
lowercase_ : Any = count_inversions_bf(__snake_case )
lowercase_ , lowercase_ : List[str] = count_inversions_recursive(__snake_case )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , __snake_case )
if __name__ == "__main__":
main()
| 33 | 1 |
from maths.prime_check import is_prime
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[Any] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase__ )
if is_prime(lowerCamelCase__ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : List[str]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TFAutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = AutoModelForPreTraining.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Any = AutoModelForCausalLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[Any]):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Tuple):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Dict = TFAutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = AutoModelForMaskedLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : str):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
SCREAMING_SNAKE_CASE__ ,output_loading_info=SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Any):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = TFAutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = AutoModelForQuestionAnswering.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
__lowerCamelCase : Union[str, Any] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = TFAutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_pt=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
__lowerCamelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(SCREAMING_SNAKE_CASE__ ,from_tf=SCREAMING_SNAKE_CASE__)
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(model.num_parameters() ,1_4_4_1_0)
self.assertEqual(model.num_parameters(only_trainable=SCREAMING_SNAKE_CASE__) ,1_4_4_1_0)
| 113 | 1 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Union[str, Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
a :Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
a :List[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
# pass variant but use the non-variant filenames
a :List[str] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
a :Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a :List[str] = '''fp16'''
self.assertFalse(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
a :Any = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
# pass variant but use the non-variant filenames
a :Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
a :Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
a :Optional[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(_lowerCamelCase , variant=_lowerCamelCase ) )
| 94 |
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
while b:
a , a :Optional[Any] = b, a % b
return a
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(UpperCAmelCase_ , a % b )
def __lowerCamelCase ( ):
"""simple docstring"""
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 94 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = BlipImageProcessor()
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""")
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""")
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
processor.save_pretrained(self.tmpdirname)
def snake_case_ ( self , **lowerCAmelCase__):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__).tokenizer
def snake_case_ ( self , **lowerCAmelCase__):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__).image_processor
def snake_case_ ( self , **lowerCAmelCase__):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__).qformer_tokenizer
def snake_case_ ( self):
shutil.rmtree(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1)) for x in image_inputs]
return image_inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname)
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0)
__SCREAMING_SNAKE_CASE = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase__ , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowerCAmelCase__)
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors="""np""")
__SCREAMING_SNAKE_CASE = processor(images=lowerCAmelCase__ , return_tensors="""np""")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = qformer_tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
self.assertListEqual(
list(inputs.keys()) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__)
self.assertListEqual(
list(inputs.keys()) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 255 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _lowerCAmelCase ( *UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_=True , UpperCamelCase_=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE = take_from
__SCREAMING_SNAKE_CASE = ()
if not isinstance(args[0] , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase_ ).base_version ) >= version.parse(UpperCamelCase_ ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
__SCREAMING_SNAKE_CASE = None
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase_ ),)
__SCREAMING_SNAKE_CASE = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase_ , UpperCamelCase_ ):
values += (getattr(UpperCamelCase_ , UpperCamelCase_ ),)
__SCREAMING_SNAKE_CASE = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
__SCREAMING_SNAKE_CASE = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase_ , stacklevel=UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) > 0:
__SCREAMING_SNAKE_CASE = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE = call_frame.filename
__SCREAMING_SNAKE_CASE = call_frame.lineno
__SCREAMING_SNAKE_CASE = call_frame.function
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase_ ) == 0:
return
elif len(UpperCamelCase_ ) == 1:
return values[0]
return values
| 255 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a__ :
"""simple docstring"""
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCamelCase ( cls , lowercase , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
return cls(common=lowercase , init_noise_sigma=lowercase , timesteps=lowercase )
@dataclass
class a__ ( _UpperCAmelCase ):
"""simple docstring"""
__lowerCamelCase = 42
class a__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowercase = 1000 , lowercase = 0.0001 , lowercase = 0.02 , lowercase = "linear" , lowercase = None , lowercase = "fixed_small" , lowercase = True , lowercase = "epsilon" , lowercase = jnp.floataa , ) -> List[Any]:
'''simple docstring'''
A__ = dtype
def UpperCamelCase ( self , lowercase = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
A__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
A__ = jnp.array(1.0 , dtype=self.dtype )
A__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowercase , init_noise_sigma=lowercase , timesteps=lowercase , )
def UpperCamelCase ( self , lowercase , lowercase , lowercase = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def UpperCamelCase ( self , lowercase , lowercase , lowercase = () ) -> DDPMSchedulerState:
'''simple docstring'''
A__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
A__ = (jnp.arange(0 , lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowercase , timesteps=lowercase , )
def UpperCamelCase ( self , lowercase , lowercase , lowercase=None , lowercase=None ) -> Optional[int]:
'''simple docstring'''
A__ = state.common.alphas_cumprod[t]
A__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
A__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
A__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
A__ = jnp.clip(lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
A__ = jnp.log(jnp.clip(lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
A__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
A__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
A__ = variance
A__ = state.common.betas[t]
A__ = (predicted_variance + 1) / 2
A__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , lowercase = None , lowercase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
A__ = timestep
if key is None:
A__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
A__ , A__ = jnp.split(lowercase , sample.shape[1] , axis=1 )
else:
A__ = None
# 1. compute alphas, betas
A__ = state.common.alphas_cumprod[t]
A__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
A__ = 1 - alpha_prod_t
A__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
A__ = model_output
elif self.config.prediction_type == "v_prediction":
A__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
A__ = jnp.clip(lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
A__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
A__ = jax.random.split(lowercase , num=1 )
A__ = jax.random.normal(lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowercase , lowercase , predicted_variance=lowercase ) ** 0.5) * noise
A__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
A__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowercase , state=lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , lowercase , lowercase , lowercase )
def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , lowercase , lowercase , lowercase )
def __len__( self ) -> str:
'''simple docstring'''
return self.config.num_train_timesteps
| 68 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 3 , lowercase = 32 , lowercase = 256 , lowercase = 32 , lowercase = None , lowercase = 0.18_215 , lowercase = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
lowerCAmelCase = VectorQuantizer(lowercase , lowercase , beta=0.25 , remap=lowercase , sane_index_shape=lowercase )
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , norm_type=lowercase , )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = True ) -> VQEncoderOutput:
lowerCAmelCase = self.encoder(lowercase )
lowerCAmelCase = self.quant_conv(lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = False , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.quantize(lowercase )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(lowercase )
lowerCAmelCase = self.decoder(lowercase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
def _snake_case ( self , lowercase , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase = sample
lowerCAmelCase = self.encode(lowercase ).latents
lowerCAmelCase = self.decode(lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
| 46 | 0 |
import random
def UpperCamelCase (lowercase_: Optional[int] , lowercase_: List[Any] ) -> Union[str, Any]:
A__ : Any = [], [], []
for element in data:
if element < pivot:
less.append(__a )
elif element > pivot:
greater.append(__a )
else:
equal.append(__a )
return less, equal, greater
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> List[str]:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__a ) or index < 0:
return None
A__ : str = items[random.randint(0 , len(__a ) - 1 )]
A__ : Optional[Any] = 0
A__ : Optional[int] = _partition(__a , __a )
A__ : Any = len(__a )
A__ : str = len(__a )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__a , __a )
# must be in larger
else:
return quick_select(__a , index - (m + count) )
| 350 |
from typing import Any
def UpperCamelCase (lowercase_: list ) -> list[Any]:
if not input_list:
return []
A__ : Any = [input_list.count(lowercase_ ) for value in input_list]
A__ : List[Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141 | 0 |
'''simple docstring'''
import os
lowercase : Optional[int] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def SCREAMING_SNAKE_CASE__ ( __A ) -> int:
_snake_case = 0
_snake_case = 0
while index < len(__A ) - 1:
_snake_case = SYMBOLS[numerals[index]]
_snake_case = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = ''
_snake_case = num // 1_000
numerals += m_count * "M"
num %= 1_000
_snake_case = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_snake_case = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def SCREAMING_SNAKE_CASE__ ( __A = "/p089_roman.txt" ) -> int:
_snake_case = 0
with open(os.path.dirname(__A ) + roman_numerals_filename ) as filea:
_snake_case = filea.readlines()
for line in lines:
_snake_case = line.strip()
_snake_case = parse_roman_numerals(__A )
_snake_case = generate_roman_numerals(__A )
savings += len(__A ) - len(__A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[str] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 | 1 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :int = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['''c'''] )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,[2] )
# Out indices set to match out features
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = get_aligned_output_features_output_indices(['''a''', '''c'''] ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['''a''', '''c'''] )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,[0, 2] )
# Out features set to match out indices
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :List[str] = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ ,[0, 2] ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['''a''', '''c'''] )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,[0, 2] )
# Out features selected from negative indices
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ ,[-3, -1] ,SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['''a''', '''c'''] )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,[-3, -1] )
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0, 1) ,SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(('''a''', '''b''') ,(0, 1) ,['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0, 1) ,['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ ,0 ,['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ ,(0, 1) ,['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0,) ,['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0, 2) ,['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(['''b''', '''a'''] ,(0, 1) ,['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] ,(0, 1, -1) ,['''a''', '''b''', '''c''', '''d'''] )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = BackboneMixin()
__SCREAMING_SNAKE_CASE :str = ['''a''', '''b''', '''c''']
__SCREAMING_SNAKE_CASE :List[str] = ['''a''', '''c''']
__SCREAMING_SNAKE_CASE :str = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,['''a''', '''c'''] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
__SCREAMING_SNAKE_CASE :List[str] = ['''a''', '''b''']
self.assertEqual(backbone.out_features ,['''a''', '''b'''] )
self.assertEqual(backbone.out_indices ,[0, 1] )
__SCREAMING_SNAKE_CASE :Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features ,['''a''', '''c'''] )
self.assertEqual(backbone.out_indices ,[-3, -1] ) | 239 |
"""simple docstring"""
import math
import random
def __lowerCamelCase ( a_ : float , a_ : bool = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase_ = 0.02
def __lowerCamelCase ( a_ : int , a_ : int ) -> float:
__SCREAMING_SNAKE_CASE :Any = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(a_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE :Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE :Tuple = (expected / 1_00) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE :Union[str, Any] = layer_1_error * sigmoid_function(a_ , a_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ = int(input("Expected value: "))
lowerCamelCase_ = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations)) | 239 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase : Tuple =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : int ):
lowercase_ :Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase_ :Tuple = 10_24
lowercase_ :Optional[Any] = 40_96
lowercase_ :Optional[Any] = 24
lowercase_ :Optional[int] = 16
lowercase_ :List[Any] = [5, 11, 17, 23]
lowercase_ :Union[str, Any] = [2_56, 5_12, 10_24, 10_24]
lowercase_ :Dict = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
lowercase_ :Optional[int] = True
lowercase_ :Any = 1_50
lowercase_ :str = "huggingface/label-files"
lowercase_ :Union[str, Any] = "ade20k-id2label.json"
lowercase_ :Tuple = json.load(open(cached_download(hf_hub_url(__lowerCamelCase ,__lowerCamelCase ,repo_type="dataset" ) ) ,"r" ) )
lowercase_ :int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
lowercase_ :int = idalabel
lowercase_ :str = {v: k for k, v in idalabel.items()}
lowercase_ :str = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCAmelCase_ ( __lowerCamelCase : int ):
lowercase_ :Union[str, Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase ,__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase_ :List[Any] = name.replace("pretrained.model" ,"dpt.encoder" )
if "pretrained.model" in name:
lowercase_ :Dict = name.replace("pretrained.model" ,"dpt.embeddings" )
if "patch_embed" in name:
lowercase_ :List[Any] = name.replace("patch_embed" ,"patch_embeddings" )
if "pos_embed" in name:
lowercase_ :Dict = name.replace("pos_embed" ,"position_embeddings" )
if "attn.proj" in name:
lowercase_ :Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase_ :int = name.replace("proj" ,"projection" )
if "blocks" in name:
lowercase_ :Dict = name.replace("blocks" ,"layer" )
if "mlp.fc1" in name:
lowercase_ :str = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
lowercase_ :Dict = name.replace("mlp.fc2" ,"output.dense" )
if "norm1" in name:
lowercase_ :str = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
lowercase_ :List[Any] = name.replace("norm2" ,"layernorm_after" )
if "scratch.output_conv" in name:
lowercase_ :Dict = name.replace("scratch.output_conv" ,"head" )
if "scratch" in name:
lowercase_ :str = name.replace("scratch" ,"neck" )
if "layer1_rn" in name:
lowercase_ :Dict = name.replace("layer1_rn" ,"convs.0" )
if "layer2_rn" in name:
lowercase_ :Optional[Any] = name.replace("layer2_rn" ,"convs.1" )
if "layer3_rn" in name:
lowercase_ :int = name.replace("layer3_rn" ,"convs.2" )
if "layer4_rn" in name:
lowercase_ :List[Any] = name.replace("layer4_rn" ,"convs.3" )
if "refinenet" in name:
lowercase_ :List[str] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase_ :Optional[int] = name.replace(F'refinenet{layer_idx}' ,F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
lowercase_ :str = name.replace("out_conv" ,"projection" )
if "resConfUnit1" in name:
lowercase_ :Any = name.replace("resConfUnit1" ,"residual_layer1" )
if "resConfUnit2" in name:
lowercase_ :List[str] = name.replace("resConfUnit2" ,"residual_layer2" )
if "conv1" in name:
lowercase_ :str = name.replace("conv1" ,"convolution1" )
if "conv2" in name:
lowercase_ :Optional[int] = name.replace("conv2" ,"convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase_ :Tuple = name.replace("pretrained.act_postprocess1.0.project.0" ,"neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase_ :Optional[int] = name.replace("pretrained.act_postprocess2.0.project.0" ,"neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase_ :Tuple = name.replace("pretrained.act_postprocess3.0.project.0" ,"neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase_ :List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" ,"neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase_ :str = name.replace("pretrained.act_postprocess1.3" ,"neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase_ :List[Any] = name.replace("pretrained.act_postprocess1.4" ,"neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase_ :Optional[int] = name.replace("pretrained.act_postprocess2.3" ,"neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase_ :Tuple = name.replace("pretrained.act_postprocess2.4" ,"neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase_ :List[str] = name.replace("pretrained.act_postprocess3.3" ,"neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase_ :Tuple = name.replace("pretrained.act_postprocess4.3" ,"neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase_ :List[str] = name.replace("pretrained.act_postprocess4.4" ,"neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase_ :List[Any] = name.replace("pretrained" ,"dpt" )
if "bn" in name:
lowercase_ :Tuple = name.replace("bn" ,"batch_norm" )
if "head" in name:
lowercase_ :Any = name.replace("head" ,"head.head" )
if "encoder.norm" in name:
lowercase_ :str = name.replace("encoder.norm" ,"layernorm" )
if "auxlayer" in name:
lowercase_ :Dict = name.replace("auxlayer" ,"auxiliary_head.head" )
return name
def UpperCAmelCase_ ( __lowerCamelCase : Any ,__lowerCamelCase : Any ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ :Optional[Any] = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
lowercase_ :str = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ :Optional[Any] = in_proj_weight[: config.hidden_size, :]
lowercase_ :Optional[Any] = in_proj_bias[: config.hidden_size]
lowercase_ :Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ :Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ :Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ :int = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( ):
lowercase_ :List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ :List[str] = Image.open(requests.get(__lowerCamelCase ,stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : int ):
lowercase_ , lowercase_ :Optional[int] = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
lowercase_ :Optional[int] = torch.hub.load_state_dict_from_url(__lowerCamelCase ,map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowercase_ :Optional[int] = state_dict.pop(__lowerCamelCase )
lowercase_ :Optional[int] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase ,__lowerCamelCase )
# load HuggingFace model
lowercase_ :int = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
lowercase_ :int = 4_80 if "ade" in checkpoint_url else 3_84
lowercase_ :Any = DPTImageProcessor(size=__lowerCamelCase )
lowercase_ :Union[str, Any] = prepare_img()
lowercase_ :Tuple = image_processor(__lowerCamelCase ,return_tensors="pt" )
# forward pass
lowercase_ :Tuple = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
# Assert logits
lowercase_ :List[str] = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
lowercase_ :Dict = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(__lowerCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] ,__lowerCamelCase ,atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] ,__lowerCamelCase )
)
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase ,__lowerCamelCase ) ,organization="nielsr" ,commit_message="Add model" ,use_temp_dir=__lowerCamelCase ,)
image_processor.push_to_hub(
repo_path_or_name=Path(__lowerCamelCase ,__lowerCamelCase ) ,organization="nielsr" ,commit_message="Add image processor" ,use_temp_dir=__lowerCamelCase ,)
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
lowerCAmelCase : List[Any] =parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 223 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a_ ( unittest.TestCase ):
def __init__( self : List[str] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : int=7 , lowercase : List[str]=True , lowercase : int=True , lowercase : str=True , lowercase : Any=True , lowercase : List[str]=99 , lowercase : Union[str, Any]=32 , lowercase : Optional[Any]=5 , lowercase : Dict=4 , lowercase : Dict=37 , lowercase : Dict="gelu" , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : List[Any]=512 , lowercase : str=16 , lowercase : Dict=2 , lowercase : Any=0.02 , lowercase : Any=4 , ):
"""simple docstring"""
lowercase_ :List[str] = parent
lowercase_ :Any = batch_size
lowercase_ :Dict = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Optional[int] = use_attention_mask
lowercase_ :Any = use_token_type_ids
lowercase_ :Union[str, Any] = use_labels
lowercase_ :Dict = vocab_size
lowercase_ :Tuple = hidden_size
lowercase_ :Tuple = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :Optional[Any] = intermediate_size
lowercase_ :str = hidden_act
lowercase_ :Tuple = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Tuple = max_position_embeddings
lowercase_ :Any = type_vocab_size
lowercase_ :int = type_sequence_label_size
lowercase_ :Tuple = initializer_range
lowercase_ :Optional[Any] = num_choices
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Union[str, Any] = None
if self.use_attention_mask:
lowercase_ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ :List[str] = None
if self.use_token_type_ids:
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :int = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Tuple = config_and_inputs
lowercase_ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Any = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ :Union[str, Any] = config_and_inputs
lowercase_ :Dict = True
lowercase_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a_ ( _lowerCAmelCase , unittest.TestCase ):
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[Any] = FlaxBertModelTester(self )
@slow
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :List[str] = FlaxBertModel.from_pretrained("bert-base-cased" )
lowercase_ :str = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
| 223 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class UpperCamelCase__ ( A__ ):
_SCREAMING_SNAKE_CASE : List[str] = 'mgp-str'
def __init__(self : Optional[int] , snake_case_ : Union[str, Any]=[3_2, 1_2_8] , snake_case_ : str=4 , snake_case_ : Tuple=3 , snake_case_ : Optional[int]=2_7 , snake_case_ : Optional[int]=3_8 , snake_case_ : str=5_0_2_5_7 , snake_case_ : List[str]=3_0_5_2_2 , snake_case_ : List[Any]=7_6_8 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : List[Any]=1_2 , snake_case_ : Tuple=4.0 , snake_case_ : Any=True , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[Any]=1E-5 , snake_case_ : Optional[int]=0.0 , snake_case_ : str=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=False , snake_case_ : Optional[Any]=0.02 , **snake_case_ : Optional[Any] , ):
super().__init__(**lowerCamelCase__ )
__a : List[Any] = image_size
__a : Optional[Any] = patch_size
__a : Optional[int] = num_channels
__a : str = max_token_length
__a : Tuple = num_character_labels
__a : Optional[Any] = num_bpe_labels
__a : Any = num_wordpiece_labels
__a : List[str] = hidden_size
__a : List[str] = num_hidden_layers
__a : Any = num_attention_heads
__a : str = mlp_ratio
__a : List[str] = distilled
__a : List[Any] = layer_norm_eps
__a : Optional[Any] = drop_rate
__a : int = qkv_bias
__a : int = attn_drop_rate
__a : Optional[int] = drop_path_rate
__a : Dict = output_aa_attentions
__a : Optional[int] = initializer_range
| 357 |
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = PhobertTokenizer
_SCREAMING_SNAKE_CASE : int = False
def lowerCAmelCase (self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Optional[int] = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@''']
__a : Tuple = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__a : int = ['''#version: 0.2''', '''l à</w>''']
__a : List[Any] = {'''unk_token''': '''<unk>'''}
__a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(snake_case_ ) )
def lowerCAmelCase (self : Union[str, Any] , **snake_case_ : List[str] ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : Dict ):
__a : Union[str, Any] = '''Tôi là VinAI Research'''
__a : int = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'''
return input_text, output_text
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : Any = '''Tôi là VinAI Research'''
__a : Union[str, Any] = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split()
__a : List[str] = tokenizer.tokenize(snake_case_ )
print(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__a : str = tokens + [tokenizer.unk_token]
__a : str = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
| 90 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowerCamelCase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
lowerCamelCase = '''▁'''
class __magic_name__ ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, lowercase_, lowercase_=True, lowercase_=True, lowercase_=False, lowercase_="[CLS]", lowercase_="[SEP]", lowercase_="<unk>", lowercase_="[SEP]", lowercase_="<pad>", lowercase_="[CLS]", lowercase_="[MASK]", lowercase_ = None, **lowercase_, ) -> None:
"""simple docstring"""
a__ =(
AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_, normalized=lowercase_ )
if isinstance(lowercase_, lowercase_ )
else mask_token
)
a__ ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_, remove_space=lowercase_, keep_accents=lowercase_, bos_token=lowercase_, eos_token=lowercase_, unk_token=lowercase_, sep_token=lowercase_, pad_token=lowercase_, cls_token=lowercase_, mask_token=lowercase_, sp_model_kwargs=self.sp_model_kwargs, **lowercase_, )
a__ =do_lower_case
a__ =remove_space
a__ =keep_accents
a__ =vocab_file
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
return len(self.sp_model )
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__ ={self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =self.__dict__.copy()
a__ =None
return state
def __setstate__( self, lowercase_ ) -> Any:
"""simple docstring"""
a__ =d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
a__ ={}
a__ =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self, lowercase_ ) -> int:
"""simple docstring"""
if self.remove_space:
a__ =''' '''.join(inputs.strip().split() )
else:
a__ =inputs
a__ =outputs.replace('''``''', '''"''' ).replace('''\'\'''', '''"''' )
if not self.keep_accents:
a__ =unicodedata.normalize('''NFKD''', lowercase_ )
a__ =''''''.join([c for c in outputs if not unicodedata.combining(lowercase_ )] )
if self.do_lower_case:
a__ =outputs.lower()
return outputs
def _UpperCAmelCase ( self, lowercase_ ) -> List[str]:
"""simple docstring"""
a__ =self.preprocess_text(lowercase_ )
a__ =self.sp_model.encode(lowercase_, out_type=lowercase_ )
a__ =[]
for piece in pieces:
if len(lowercase_ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
a__ =self.sp_model.EncodeAsPieces(piece[:-1].replace(lowercase_, '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__ =cur_pieces[1:]
else:
a__ =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowercase_ )
else:
new_pieces.append(lowercase_ )
return new_pieces
def _UpperCAmelCase ( self, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.PieceToId(lowercase_ )
def _UpperCAmelCase ( self, lowercase_ ) -> int:
"""simple docstring"""
return self.sp_model.IdToPiece(lowercase_ )
def _UpperCAmelCase ( self, lowercase_ ) -> Tuple:
"""simple docstring"""
a__ =[]
a__ =''''''
a__ =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_ ) + token
a__ =True
a__ =[]
else:
current_sub_tokens.append(lowercase_ )
a__ =False
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
"""simple docstring"""
a__ =[self.sep_token_id]
a__ =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None, lowercase_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_, token_ids_a=lowercase_, already_has_special_tokens=lowercase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
"""simple docstring"""
a__ =[self.sep_token_id]
a__ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a__ =os.path.join(
lowercase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_, '''wb''' ) as fi:
a__ =self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 188 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase__ ( _A : int = 3 ):
'''simple docstring'''
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
a__ =QuantumRegister(_A , '''qr''' )
a__ =ClassicalRegister(_A , '''cr''' )
a__ =QuantumCircuit(_A , _A )
a__ =number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
a__ =Aer.get_backend('''qasm_simulator''' )
a__ =execute(_A , _A , shots=1_00_00 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 188 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any]=1_0 ):
__a : List[str] = []
for _ in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any=1_0 ):
__a : Optional[Any] = []
for step in range(lowerCAmelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__a : Tuple = os.path.join(lowerCAmelCase__ , '''schedule.bin''' )
torch.save(scheduler.state_dict() , lowerCAmelCase__ )
__a : Tuple = torch.load(lowerCAmelCase__ )
scheduler.load_state_dict(lowerCAmelCase__ )
return lrs
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : str , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Any ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ )
def lowerCAmelCase (self : int ):
__a : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__a : Any = torch.tensor([0.4, 0.2, -0.5] )
__a : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : Any = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(1_0_0 ):
__a : List[str] = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def lowerCAmelCase (self : Tuple ):
__a : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
__a : str = torch.tensor([0.4, 0.2, -0.5] )
__a : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__a : str = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case_ , weight_decay=0.0 , relative_step=snake_case_ , scale_parameter=snake_case_ , warmup_init=snake_case_ , )
for _ in range(1_0_0_0 ):
__a : Any = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(50 ,50 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Union[str, Any] = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None
_SCREAMING_SNAKE_CASE : Any = 10
def lowerCAmelCase (self : Tuple , snake_case_ : str , snake_case_ : int , snake_case_ : int , snake_case_ : str=None ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ , msg=snake_case_ )
def lowerCAmelCase (self : List[str] ):
__a : List[Any] = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__a : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
__a , __a : List[str] = data
__a : int = scheduler_func(self.optimizer , **snake_case_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__a : Dict = unwrap_schedule(snake_case_ , self.num_steps )
self.assertListAlmostEqual(
snake_case_ , snake_case_ , tol=1E-2 , msg=f"failed for {scheduler_func} in normal scheduler" , )
__a : str = scheduler_func(self.optimizer , **snake_case_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case_ ) # wrap to test picklability of the schedule
__a : Any = unwrap_and_save_reload_schedule(snake_case_ , self.num_steps )
self.assertListEqual(snake_case_ , snake_case_ , msg=f"failed for {scheduler_func} in save and reload" )
class UpperCamelCase__ :
def __init__(self : str , snake_case_ : Dict ):
__a : int = fn
def __call__(self : str , *snake_case_ : Union[str, Any] , **snake_case_ : Tuple ):
return self.fn(*snake_case_ , **snake_case_ )
@classmethod
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Dict ):
__a : List[Any] = list(map(self , scheduler.lr_lambdas ) )
| 90 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Tuple ):
__a : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=snake_case_ ).to(snake_case_ )
__a : List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__a : Optional[int] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__a : Dict = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__a : Optional[Any] = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss
__a : Tuple = -(labels.shape[-1] * loss.item())
__a : Dict = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 90 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : Union[str, Any] = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
while a != 0:
_lowerCAmelCase , _lowerCAmelCase = b % a, a
return b
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) != 1:
_lowerCAmelCase = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1, 0, a
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 1, m
while va != 0:
_lowerCAmelCase = ua // va
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 158 | 0 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = 1
@register_to_config
def __init__( self : Dict , UpperCAmelCase__ : int = 1_0_0_0 , UpperCAmelCase__ : Optional[Union[np.ndarray, List[float]]] = None ) -> Optional[int]:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCAmelCase__ )
# standard deviation of the initial noise distribution
__SCREAMING_SNAKE_CASE = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__SCREAMING_SNAKE_CASE = 4
# running values
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, torch.device] = None ) -> str:
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__SCREAMING_SNAKE_CASE = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = torch.sin(steps * math.pi / 2 ) ** 2
__SCREAMING_SNAKE_CASE = (1.0 - self.betas**2) ** 0.5
__SCREAMING_SNAKE_CASE = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__SCREAMING_SNAKE_CASE = timesteps.to(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : int , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : bool = True , ) -> Union[SchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
__SCREAMING_SNAKE_CASE = (self.timesteps == timestep).nonzero().item()
__SCREAMING_SNAKE_CASE = timestep_index + 1
__SCREAMING_SNAKE_CASE = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase__ )
if len(self.ets ) == 1:
__SCREAMING_SNAKE_CASE = self.ets[-1]
elif len(self.ets ) == 2:
__SCREAMING_SNAKE_CASE = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__SCREAMING_SNAKE_CASE = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
__SCREAMING_SNAKE_CASE = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
__SCREAMING_SNAKE_CASE = self._get_prev_sample(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : torch.FloatTensor , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : int ) -> torch.FloatTensor:
return sample
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> Any:
__SCREAMING_SNAKE_CASE = self.alphas[timestep_index]
__SCREAMING_SNAKE_CASE = self.betas[timestep_index]
__SCREAMING_SNAKE_CASE = self.alphas[prev_timestep_index]
__SCREAMING_SNAKE_CASE = self.betas[prev_timestep_index]
__SCREAMING_SNAKE_CASE = (sample - sigma * ets) / max(UpperCAmelCase__ , 1E-8 )
__SCREAMING_SNAKE_CASE = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : str ) -> Optional[Any]:
return self.config.num_train_timesteps | 360 |
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
a__ : Optional[Any] = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(s_dict.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = R".*/layers_(\d+)"
__SCREAMING_SNAKE_CASE = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.sub(R"layers_(\d+)" , R"block/\1/layer" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = R"(encoder|decoder)\/"
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/1/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/1/layer_norm/" , lowerCAmelCase_ )
elif groups[0] == "decoder":
__SCREAMING_SNAKE_CASE = re.sub(R"/mlp/" , R"/2/mlp/" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.sub(R"/pre_mlp_layer_norm/" , R"/2/layer_norm/" , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__SCREAMING_SNAKE_CASE = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""{key} -> {new_key}""" )
__SCREAMING_SNAKE_CASE = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__SCREAMING_SNAKE_CASE = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__SCREAMING_SNAKE_CASE = s_dict[key].shape[0]
__SCREAMING_SNAKE_CASE = s_dict[key]
for idx in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(lowerCAmelCase_ )
return s_dict
a__ : List[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
import regex as re
with open(lowerCAmelCase_ , "r" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
__SCREAMING_SNAKE_CASE = re.findall(R"(.*) = ([0-9.]*)" , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__SCREAMING_SNAKE_CASE = float(lowerCAmelCase_ ) if "." in value else int(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = re.findall(R"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase_ )[0]
__SCREAMING_SNAKE_CASE = str(activation[1] )
__SCREAMING_SNAKE_CASE = num_experts
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_="./" , lowerCAmelCase_=8 ):
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
__SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
__SCREAMING_SNAKE_CASE = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
__SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = flax_params["target"]
__SCREAMING_SNAKE_CASE = flatten_dict(lowerCAmelCase_ , sep="/" )
__SCREAMING_SNAKE_CASE = rename_keys(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = unflatten_dict(lowerCAmelCase_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
a__ : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 195 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __snake_case ( ):
"""simple docstring"""
A_ = [randint(-1000 ,1000 ) for i in range(10 )]
A_ = randint(-5000 ,5000 )
return (arr, r)
__a :Optional[int] = make_dataset()
def __snake_case ( __UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
for triplet in permutations(__UpperCamelCase ,3 ):
if sum(__UpperCamelCase ) == target:
return tuple(sorted(__UpperCamelCase ) )
return (0, 0, 0)
def __snake_case ( __UpperCamelCase : list[int] ,__UpperCamelCase : int ):
"""simple docstring"""
arr.sort()
A_ = len(__UpperCamelCase )
for i in range(n - 1 ):
A_ , A_ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __snake_case ( ):
"""simple docstring"""
A_ = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
A_ = "\ntriplet_sum1(*dataset)\n"
A_ = "\ntriplet_sum2(*dataset)\n"
A_ = repeat(setup=__UpperCamelCase ,stmt=__UpperCamelCase ,repeat=5 ,number=1_0000 )
A_ = repeat(setup=__UpperCamelCase ,stmt=__UpperCamelCase ,repeat=5 ,number=1_0000 )
return (min(__UpperCamelCase ), min(__UpperCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__a :List[Any] = solution_times()
print(F"The time for naive implementation is {times[0]}.")
print(F"The time for optimized implementation is {times[1]}.") | 312 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__a :Any = logging.get_logger(__name__)
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = "huggingface/label-files"
A_ = "imagenet-1k-id2label.json"
A_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type="dataset" ) ,"r" ) )
A_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A_ = {v: k for k, v in idalabel.items()}
A_ = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
A_ = BitConfig(
conv_layer=__UpperCamelCase ,num_labels=1000 ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,)
return config
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if "stem.conv" in name:
A_ = name.replace("stem.conv" ,"bit.embedder.convolution" )
if "blocks" in name:
A_ = name.replace("blocks" ,"layers" )
if "head.fc" in name:
A_ = name.replace("head.fc" ,"classifier.1" )
if name.startswith("norm" ):
A_ = "bit." + name
if "bit" not in name and "classifier" not in name:
A_ = "bit.encoder." + name
return name
def __snake_case ( ):
"""simple docstring"""
A_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple=False ):
"""simple docstring"""
A_ = get_config(__UpperCamelCase )
# load original model from timm
A_ = create_model(__UpperCamelCase ,pretrained=__UpperCamelCase )
timm_model.eval()
# load state_dict of original model
A_ = timm_model.state_dict()
for key in state_dict.copy().keys():
A_ = state_dict.pop(__UpperCamelCase )
A_ = val.squeeze() if "head" in key else val
# load HuggingFace model
A_ = BitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# create image processor
A_ = create_transform(**resolve_data_config({} ,model=__UpperCamelCase ) )
A_ = transform.transforms
A_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
A_ = BitImageProcessor(
do_resize=__UpperCamelCase ,size={"shortest_edge": timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__UpperCamelCase ,crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} ,do_normalize=__UpperCamelCase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
A_ = prepare_img()
A_ = transform(__UpperCamelCase ).unsqueeze(0 )
A_ = processor(__UpperCamelCase ,return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
# verify logits
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ = outputs.logits
print("Logits:" ,logits[0, :3] )
print("Predicted class:" ,model.config.idalabel[logits.argmax(-1 ).item()] )
A_ = timm_model(__UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase ,outputs.logits ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print(f'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(f'''ybelkada/{model_name}''' )
processor.push_to_hub(f'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__a :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__a :str = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 312 | 1 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def lowercase_ (self : Tuple ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowercase_ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.dummy_uncond_unet
UpperCAmelCase__ = ScoreSdeVeScheduler()
UpperCAmelCase__ = ScoreSdeVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
sde_ve.to(__UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__UpperCAmelCase ).images
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=__UpperCAmelCase , return_dict=__UpperCAmelCase )[
0
]
UpperCAmelCase__ = image[0, -3:, -3:, -1]
UpperCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase__ = "google/ncsnpp-church-256"
UpperCAmelCase__ = UNetaDModel.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = ScoreSdeVeScheduler.from_pretrained(__UpperCAmelCase )
UpperCAmelCase__ = ScoreSdeVePipeline(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
sde_ve.to(__UpperCAmelCase )
sde_ve.set_progress_bar_config(disable=__UpperCAmelCase )
UpperCAmelCase__ = torch.manual_seed(0 )
UpperCAmelCase__ = sde_ve(num_inference_steps=1_0 , output_type="numpy" , generator=__UpperCAmelCase ).images
UpperCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
UpperCAmelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 143 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer
__UpperCAmelCase : Any = ['input_ids', 'attention_mask']
def __init__(self : Optional[int] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : int="<mask_2>" , __UpperCAmelCase : Optional[Any]="<mask_1>" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=1_0_3 , **__UpperCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is"""
f""" {type(__UpperCAmelCase )}""" )
UpperCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase__ = additional_special_tokens_extended
else:
UpperCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ (self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 143 | 1 |
from torch import nn
def A ( a_ ) -> Any:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 71 |
import random
from .binary_exp_mod import bin_exp_mod
def A ( a_ ,a_=1_000 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
__UpperCamelCase : List[Any] =n - 1
__UpperCamelCase : Dict =0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
__UpperCamelCase : Optional[Any] =0
while count < prec:
__UpperCamelCase : Dict =random.randint(2 ,n - 1 )
__UpperCamelCase : Optional[Any] =bin_exp_mod(a_ ,a_ ,a_ )
if b != 1:
__UpperCamelCase : List[str] =True
for _ in range(a_ ):
if b == n - 1:
__UpperCamelCase : Tuple =False
break
__UpperCamelCase : Dict =b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
A_ :str = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 71 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _SCREAMING_SNAKE_CASE : int | str ):
'''simple docstring'''
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE )
return n == n[::-1]
def lowercase ( _SCREAMING_SNAKE_CASE : int = 100_0000 ):
'''simple docstring'''
_UpperCAmelCase = 0
for i in range(1 , _SCREAMING_SNAKE_CASE ):
if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 326 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__A : List[Any] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
__A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 326 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> Optional[Any]:
_lowercase : List[Any] = str(a_ )
return n == n[::-1]
def _A ( snake_case = 1_00_00_00 ) -> str:
_lowercase : List[Any] = 0
for i in range(1 , a_ ):
if is_palindrome(a_ ) and is_palindrome(bin(a_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 250 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Any = (3_2, 3_2)
_UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=lowerCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Union[str, Any] = DDPMScheduler()
_UpperCAmelCase : str = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : List[str] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : int = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : Dict = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : str = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : Dict = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=lowerCAmelCase__ , )[0]
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCAmelCase : Optional[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Optional[Any] = self.dummy_cond_unet_upscale
_UpperCAmelCase : Tuple = DDPMScheduler()
_UpperCAmelCase : Dict = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : str = self.dummy_vae
_UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
_UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : List[str] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : int = output.images
assert image.shape[0] == 2
_UpperCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
_UpperCAmelCase : List[Any] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = self.dummy_cond_unet_upscale
_UpperCAmelCase : Any = DDPMScheduler()
_UpperCAmelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase : Optional[int] = self.dummy_vae
_UpperCAmelCase : List[Any] = self.dummy_text_encoder
_UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_UpperCAmelCase : Tuple = unet.half()
_UpperCAmelCase : Dict = text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=lowerCAmelCase__ , low_res_scheduler=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , max_noise_level=3_5_0 , )
_UpperCAmelCase : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_UpperCAmelCase : Dict = "A painting of a squirrel eating a burger"
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , ).images
_UpperCAmelCase : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCAmelCase : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : str = StableDiffusionUpscalePipeline.from_pretrained(lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Union[str, Any] = "a cat sitting on a park bench"
_UpperCAmelCase : str = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCAmelCase : Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
_UpperCAmelCase : Dict = "a cat sitting on a park bench"
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , output_type="np" , )
_UpperCAmelCase : str = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase : int = "stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
lowerCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase : Tuple = "a cat sitting on a park bench"
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , output_type="np" , )
_UpperCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9 | 145 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 51 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : str ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Optional[Any] = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Any = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCamelCase__ : Union[str, Any] = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : List[Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCamelCase__ : Dict = DDPMScheduler()
UpperCamelCase__ : List[Any] = AudioDiffusionPipeline(vqvae=lowerCamelCase__ , unet=self.dummy_unet , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : List[str] = pipe(generator=lowerCamelCase__ , steps=4 )
UpperCamelCase__ : Union[str, Any] = output.audios[0]
UpperCamelCase__ : Any = output.images[0]
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : int = pipe(generator=lowerCamelCase__ , steps=4 , return_dict=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : Any = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : str = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ : List[str] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCamelCase__ : Optional[Any] = DDIMScheduler()
UpperCamelCase__ : Union[str, Any] = self.dummy_vqvae_and_unet
UpperCamelCase__ : Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ : Optional[int] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : Union[str, Any] = pipe(raw_audio=lowerCamelCase__ , generator=lowerCamelCase__ , start_step=5 , steps=10 )
UpperCamelCase__ : int = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ : List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ : Any = self.dummy_unet_condition
UpperCamelCase__ : str = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCamelCase__ , mel=lowerCamelCase__ , scheduler=lowerCamelCase__ )
UpperCamelCase__ : Any = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ : Union[str, Any] = torch.rand((1, 1, 10) )
UpperCamelCase__ : int = pipe(generator=lowerCamelCase__ , encoding=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = output.images[0]
UpperCamelCase__ : List[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : str = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = torch_device
UpperCamelCase__ : Dict = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCamelCase__ : Optional[int] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(42 )
UpperCamelCase__ : Optional[Any] = pipe(generator=lowerCamelCase__ )
UpperCamelCase__ : Dict = output.audios[0]
UpperCamelCase__ : Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
UpperCamelCase__ : List[Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 51 | 1 |
'''simple docstring'''
from collections.abc import Generator
def a_ ( ) -> Generator[int, None, None]:
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = 0, 1
while True:
__lowerCamelCase ,__lowerCamelCase : Dict = b, a + b
yield b
def a_ ( _lowerCAmelCase = 1000 ) -> int:
__lowerCamelCase : Any = 1
__lowerCamelCase : str = fibonacci_generator()
while len(str(next(_lowerCAmelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 208 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 208 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : float = 3.0
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Union[str, Any] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=UpperCamelCase_ ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def lowerCAmelCase__ ( self: str ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__lowerCamelCase = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
__lowerCamelCase = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__lowerCamelCase = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , UpperCamelCase_ )
@require_multi_gpu
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCAmelCase_ = Accelerator(kwargs_handlers=[ddp_scaler])
UpperCAmelCase_ = torch.nn.Linear(100, 200)
UpperCAmelCase_ = accelerator.prepare(model)
# Check the values changed in kwargs
UpperCAmelCase_ = ''
UpperCAmelCase_ = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 29 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29 | 1 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Any = HfArgumentParser(UpperCamelCase__ )
_a : int = parser.parse_args_into_dataclasses()[0]
_a : Tuple = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
_a : Dict = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_a : List[Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
_a : List[str] = """ """.join(str(UpperCamelCase__ ).split(""" """ )[:-1] )
_a : Union[str, Any] = """"""
_a : Dict = eval(str(UpperCamelCase__ ).split(""" """ )[-1] )
_a : Optional[Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_a : Dict = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 294 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase :
def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict:
if k in (0.0_4, 0.0_6):
_a : List[str] = k
_a : List[Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Dict ) -> str:
return str(self.k )
def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]:
_a : Dict = cva.imread(UpperCAmelCase__ , 0 )
_a , _a : List[Any] = img.shape
_a : list[list[int]] = []
_a : List[Any] = img.copy()
_a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB )
_a , _a : Any = np.gradient(UpperCAmelCase__ )
_a : Tuple = dx**2
_a : Union[str, Any] = dy**2
_a : Union[str, Any] = dx * dy
_a : int = 0.0_4
_a : List[str] = self.window_size // 2
for y in range(UpperCAmelCase__ , h - offset ):
for x in range(UpperCAmelCase__ , w - offset ):
_a : str = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Tuple = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_a : Any = (wxx * wyy) - (wxy**2)
_a : Tuple = wxx + wyy
_a : Any = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_snake_case = HarrisCorner(0.04, 3)
_snake_case , _snake_case = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 294 | 1 |
def __lowerCAmelCase ( a__ = 100_0000 ) -> int:
__a = set(range(3 , a__ , 2 ) )
primes.add(2 )
for p in range(3 , a__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , a__ , a__ ) ) )
__a = [float(a__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(a__ , limit + 1 , a__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }") | 350 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 33 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : Optional[Any] ):
A = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
A = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
A = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A = key[key.find('patch_embed' ) + len('patch_embed' )]
A = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case__ )-1}' )
if "norm" in key:
A = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
A = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case__ )-1}' )
if "layer_norm1" in key:
A = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
A = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
A = key[key.find('block' ) + len('block' )]
A = key.replace(F'block{idx}' , F'block.{int(snake_case__ )-1}' )
if "attn.q" in key:
A = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
A = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
A = key.replace('attn' , 'attention.self' )
if "fc1" in key:
A = key.replace('fc1' , 'dense1' )
if "fc2" in key:
A = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
A = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
A = key.replace('linear_fuse.conv' , 'linear_fuse' )
A = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A = key[key.find('linear_c' ) + len('linear_c' )]
A = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case__ )-1}' )
if "bot_conv" in key:
A = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
A = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
A = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
A = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
A = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
A = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
A = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
A = key.replace('module.last_layer_depth' , 'head.head' )
A = value
return new_state_dict
def _snake_case ( snake_case__ : List[Any] , snake_case__ : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
A = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
A = kv_weight[
: config.hidden_sizes[i], :
]
A = kv_bias[: config.hidden_sizes[i]]
A = kv_weight[
config.hidden_sizes[i] :, :
]
A = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ):
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return image
@torch.no_grad()
def _snake_case ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : str=False , snake_case__ : Union[str, Any]=None ):
A = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A = GLPNImageProcessor()
# prepare image
A = prepare_img()
A = image_processor(images=snake_case__ , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
A = torch.load(snake_case__ , map_location=torch.device('cpu' ) )
# rename keys
A = rename_keys(snake_case__ )
# key and value matrices need special treatment
read_in_k_v(snake_case__ , snake_case__ )
# create HuggingFace model and load state dict
A = GLPNForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# forward pass
A = model(snake_case__ )
A = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
A = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
A = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case__ , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=snake_case__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=snake_case__ , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
_lowercase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 74 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=0.999 , snake_case__ : Union[str, Any]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A = []
for i in range(snake_case__ ):
A = i / num_diffusion_timesteps
A = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( _lowercase , _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
_lowerCamelCase: Optional[Any] = 2
@register_to_config
def __init__( self : str ,A_ : int = 1000 ,A_ : float = 0.0_00_85 ,A_ : float = 0.0_12 ,A_ : str = "linear" ,A_ : Optional[Union[np.ndarray, List[float]]] = None ,A_ : str = "epsilon" ,A_ : Optional[bool] = False ,A_ : Optional[bool] = False ,A_ : float = 1.0 ,A_ : str = "linspace" ,A_ : int = 0 ,) -> List[str]:
if trained_betas is not None:
A = torch.tensor(A_ ,dtype=torch.floataa )
elif beta_schedule == "linear":
A = torch.linspace(A_ ,A_ ,A_ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,A_ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A = betas_for_alpha_bar(A_ ,alpha_transform_type='cosine' )
elif beta_schedule == "exp":
A = betas_for_alpha_bar(A_ ,alpha_transform_type='exp' )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
A = 1.0 - self.betas
A = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(A_ ,A_ ,A_ )
A = use_karras_sigmas
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Tuple=None ) -> Tuple:
if schedule_timesteps is None:
A = self.timesteps
A = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A = 1 if len(A_ ) > 1 else 0
else:
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
A = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : torch.FloatTensor ,A_ : Union[float, torch.FloatTensor] ,) -> torch.FloatTensor:
A = self.index_for_timestep(A_ )
A = self.sigmas[step_index]
A = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Union[str, torch.device] = None ,A_ : Optional[int] = None ,) -> Optional[Any]:
A = num_inference_steps
A = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A = np.linspace(0 ,num_train_timesteps - 1 ,A_ ,dtype=A_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(0 ,A_ ) * step_ratio).round()[::-1].copy().astype(A_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A = (np.arange(A_ ,0 ,-step_ratio )).round().copy().astype(A_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A = np.log(A_ )
A = np.interp(A_ ,np.arange(0 ,len(A_ ) ) ,A_ )
if self.config.use_karras_sigmas:
A = self._convert_to_karras(in_sigmas=A_ ,num_inference_steps=self.num_inference_steps )
A = np.array([self._sigma_to_t(A_ ,A_ ) for sigma in sigmas] )
A = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A = torch.from_numpy(A_ ).to(device=A_ )
A = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A = torch.from_numpy(A_ )
A = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(A_ ).startswith('mps' ):
# mps does not support float64
A = timesteps.to(A_ ,dtype=torch.floataa )
else:
A = timesteps.to(device=A_ )
# empty dt and derivative
A = None
A = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A = defaultdict(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : List[str] ) -> Dict:
# get log sigma
A = np.log(A_ )
# get distribution
A = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A = low_idx + 1
A = log_sigmas[low_idx]
A = log_sigmas[high_idx]
# interpolate sigmas
A = (low - log_sigma) / (low - high)
A = np.clip(A_ ,0 ,1 )
# transform interpolation to time range
A = (1 - w) * low_idx + w * high_idx
A = t.reshape(sigma.shape )
return t
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : torch.FloatTensor ,A_ : int ) -> torch.FloatTensor:
A = in_sigmas[-1].item()
A = in_sigmas[0].item()
A = 7.0 # 7.0 is the value used in the paper
A = np.linspace(0 ,1 ,A_ )
A = sigma_min ** (1 / rho)
A = sigma_max ** (1 / rho)
A = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return self.dt is None
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : Union[float, torch.FloatTensor] ,A_ : Union[torch.FloatTensor, np.ndarray] ,A_ : bool = True ,) -> Union[SchedulerOutput, Tuple]:
A = self.index_for_timestep(A_ )
# advance index counter by 1
A = timestep.cpu().item() if torch.is_tensor(A_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A = self.sigmas[step_index]
A = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A = self.sigmas[step_index - 1]
A = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A = 0
A = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A = sigma_hat if self.state_in_first_order else sigma_next
A = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A = sigma_hat if self.state_in_first_order else sigma_next
A = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A = sigma_next - sigma_hat
# store for 2nd order step
A = derivative
A = dt
A = sample
else:
# 2. 2nd order / Heun's method
A = (sample - pred_original_sample) / sigma_next
A = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A = self.dt
A = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A = None
A = None
A = None
A = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,A_ : torch.FloatTensor ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A_ ):
# mps does not support float64
A = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A = self.timesteps.to(original_samples.device )
A = timesteps.to(original_samples.device )
A = [self.index_for_timestep(A_ ,A_ ) for t in timesteps]
A = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A = sigma.unsqueeze(-1 )
A = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ) -> int:
return self.config.num_train_timesteps | 74 | 1 |
UpperCAmelCase_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
UpperCAmelCase_ = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def lowerCamelCase__ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
assert len(str(UpperCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
_snake_case = OmegaConf.load(UpperCamelCase__ )
_snake_case = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
_snake_case = list(state_dict.keys() )
# extract state_dict for VQVAE
_snake_case = {}
_snake_case = 'first_stage_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
# extract state_dict for UNetLDM
_snake_case = {}
_snake_case = 'model.diffusion_model.'
for key in keys:
if key.startswith(UpperCamelCase__ ):
_snake_case = state_dict[key]
_snake_case = config.model.params.first_stage_config.params
_snake_case = config.model.params.unet_config.params
_snake_case = VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
_snake_case = UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
_snake_case = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='scaled_linear' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , )
_snake_case = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
UpperCAmelCase_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 295 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase : Tuple ='''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCamelCase__ : Dict = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase__ : Optional[int] = get_sagemaker_input()
else:
UpperCamelCase__ : Dict = get_cluster_input()
return config
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=None ) -> List[str]:
if subparsers is not None:
UpperCamelCase__ : List[Any] = subparsers.add_parser("config" , description=__lowerCAmelCase )
else:
UpperCamelCase__ : Any = argparse.ArgumentParser("Accelerate config command" , description=__lowerCAmelCase )
parser.add_argument(
"--config_file" , default=__lowerCAmelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=__lowerCAmelCase )
return parser
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Optional[Any]:
UpperCamelCase__ : Union[str, Any] = get_user_input()
if args.config_file is not None:
UpperCamelCase__ : Tuple = args.config_file
else:
if not os.path.isdir(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
UpperCamelCase__ : Union[str, Any] = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(__lowerCAmelCase )
else:
config.to_yaml_file(__lowerCAmelCase )
print(f'accelerate configuration saved at {config_file}' )
def SCREAMING_SNAKE_CASE ( ) -> int:
UpperCamelCase__ : List[str] = config_command_parser()
UpperCamelCase__ : Dict = parser.parse_args()
config_command(__lowerCAmelCase )
if __name__ == "__main__":
main() | 189 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __a ( A__ ):
_lowerCAmelCase : str = '''facebook/bart-large-mnli'''
_lowerCAmelCase : Tuple = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
_lowerCAmelCase : Any = '''text_classifier'''
_lowerCAmelCase : int = AutoTokenizer
_lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification
_lowerCAmelCase : Union[str, Any] = ['''text''', ['''text''']]
_lowerCAmelCase : Dict = ['''text''']
def __lowercase ( self : int ):
'''simple docstring'''
super().setup()
UpperCamelCase__ : Dict = self.model.config
UpperCamelCase__ : Union[str, Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
UpperCamelCase__ : List[str] = int(SCREAMING_SNAKE_CASE )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Any = labels
return self.pre_processor(
[text] * len(SCREAMING_SNAKE_CASE ) , [F'This example is {label}' for label in labels] , return_tensors="pt" , padding="max_length" , )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = outputs.logits
UpperCamelCase__ : Any = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 189 | 1 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A (__lowerCamelCase :Namespace ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowercase = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _lowercase ( _lowercase ):
"""simple docstring"""
_lowerCAmelCase = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=_lowercase , required=_lowercase , help="""Model's type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=_lowercase , required=_lowercase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=_lowercase , required=_lowercase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=_lowercase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=_lowercase , default=_lowercase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(F'Loading model {model_type}' )
_lowerCAmelCase = model_type
_lowerCAmelCase = tf_checkpoint
_lowerCAmelCase = pytorch_dump_output
_lowerCAmelCase = config
_lowerCAmelCase = finetuning_task_name
def _lowercase ( self ):
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCAmelCase = self._tf_checkpoint
_lowerCAmelCase = """"""
else:
_lowerCAmelCase = self._tf_checkpoint
_lowerCAmelCase = """"""
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 229 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 229 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = s.rsplit(__lowerCamelCase, __lowerCamelCase )
return new.join(__lowerCamelCase )
def __a ( __lowerCamelCase ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : List[str] = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase_ : Tuple = key.replace(f"""{group_key}.""", f"""{group_key}.group.""" )
if "res_path" in key:
UpperCAmelCase_ : Optional[Any] = key.replace("res_path.", "res_path.path." )
if key.endswith(".w" ):
UpperCAmelCase_ : Any = rreplace(__lowerCamelCase, ".w", ".weight", 1 )
if key.endswith(".b" ):
UpperCAmelCase_ : Optional[Any] = rreplace(__lowerCamelCase, ".b", ".bias", 1 )
UpperCAmelCase_ : Optional[Any] = value.float()
return upgrade
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, __lowerCamelCase=True ):
from dall_e import Encoder
UpperCAmelCase_ : Any = Encoder()
if os.path.exists(__lowerCamelCase ):
UpperCAmelCase_ : str = torch.load(__lowerCamelCase )
else:
UpperCAmelCase_ : List[str] = torch.hub.load_state_dict_from_url(__lowerCamelCase )
if isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = ckpt.state_dict()
encoder.load_state_dict(__lowerCamelCase )
if config_path is not None:
UpperCAmelCase_ : int = FlavaImageCodebookConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase_ : List[Any] = FlavaImageCodebookConfig()
UpperCAmelCase_ : str = FlavaImageCodebook(__lowerCamelCase ).eval()
UpperCAmelCase_ : Dict = encoder.state_dict()
UpperCAmelCase_ : List[str] = upgrade_state_dict(__lowerCamelCase )
hf_model.load_state_dict(__lowerCamelCase )
UpperCAmelCase_ : List[str] = hf_model.state_dict()
UpperCAmelCase_ : List[Any] = count_parameters(__lowerCamelCase )
UpperCAmelCase_ : List[Any] = count_parameters(__lowerCamelCase )
assert torch.allclose(__lowerCamelCase, __lowerCamelCase, atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(__lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 61 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase_ = "true"
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=82 , SCREAMING_SNAKE_CASE__ : Optional[int]=16 ) -> Optional[Any]:
'''simple docstring'''
set_seed(42 )
A__ = RegressionModel()
A__ = deepcopy(SCREAMING_SNAKE_CASE__ )
A__ = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
A__ = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
model.to(accelerator.device )
A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model, ddp_model, dataloader
def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int:
'''simple docstring'''
A__ = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
A__ = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : List[Any] ):
A__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
with accelerator.main_process_first():
A__ = dataset.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
A__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ):
if use_longest:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=16 )
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> str:
'''simple docstring'''
A__ = Accelerator(dispatch_batches=SCREAMING_SNAKE_CASE__ , split_batches=SCREAMING_SNAKE_CASE__ )
A__ = get_dataloader(SCREAMING_SNAKE_CASE__ , not dispatch_batches )
A__ = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ )
A__ , A__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ = []
for batch in dataloader:
A__ , A__ = batch.values()
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ , A__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A__ , A__ = [], []
for logit, targ in logits_and_targets:
logits.append(SCREAMING_SNAKE_CASE__ )
targs.append(SCREAMING_SNAKE_CASE__ )
A__ , A__ = torch.cat(SCREAMING_SNAKE_CASE__ ), torch.cat(SCREAMING_SNAKE_CASE__ )
return logits, targs
def _snake_case( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : int=82 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=16 ) -> List[Any]:
'''simple docstring'''
A__ , A__ , A__ = get_basic_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ , A__ = generate_predictions(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert (
len(SCREAMING_SNAKE_CASE__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(SCREAMING_SNAKE_CASE__ )}'
def _snake_case( SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False ) -> str:
'''simple docstring'''
A__ = evaluate.load('glue' , 'mrpc' )
A__ , A__ = get_mrpc_setup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# First do baseline
A__ , A__ , A__ = setup['no']
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
for batch in dataloader:
batch.to(SCREAMING_SNAKE_CASE__ )
with torch.inference_mode():
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=batch['labels'] )
A__ = metric.compute()
# Then do distributed
A__ , A__ , A__ = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits.argmax(dim=-1 )
A__ = batch['labels']
A__ , A__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ )
A__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _snake_case( ) -> Optional[Any]:
'''simple docstring'''
A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A__ = Accelerator(split_batches=SCREAMING_SNAKE_CASE__ , dispatch_batches=SCREAMING_SNAKE_CASE__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(SCREAMING_SNAKE_CASE__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
A__ = Accelerator()
test_torch_metrics(SCREAMING_SNAKE_CASE__ , 512 )
accelerator.state._reset_state()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 7 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
snake_case = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase__ , PIL.Image.Image ):
UpperCamelCase__ : Union[str, Any] = [image]
UpperCamelCase__ : List[str] = [trans(img.convert("RGB" ) ) for img in image]
UpperCamelCase__ : str = torch.stack(lowerCAmelCase__ )
return image
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase__ : List[Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A__ , scheduler=A__ )
def _A ( self : str , UpperCAmelCase_ : Union[str, Any] ):
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _A ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple ):
# get the original timestep using init_timestep
UpperCamelCase__ : List[str] = min(int(num_inference_steps * strength ) , A__ )
UpperCamelCase__ : Optional[Any] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _A ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=None ):
if not isinstance(A__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A__ )}''' )
UpperCamelCase__ : Optional[Any] = image.to(device=A__ , dtype=A__ )
if isinstance(A__ , A__ ) and len(A__ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(A__ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase__ : List[Any] = init_latents.shape
UpperCamelCase__ : List[str] = randn_tensor(A__ , generator=A__ , device=A__ , dtype=A__ )
# get latents
print("add noise to latents at timestep" , A__ )
UpperCamelCase__ : List[Any] = self.scheduler.add_noise(A__ , A__ , A__ )
UpperCamelCase__ : int = init_latents
return latents
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCAmelCase_ : int = None , UpperCAmelCase_ : str = 0.8 , UpperCAmelCase_ : List[Any] = 1 , UpperCAmelCase_ : List[Any] = None , UpperCAmelCase_ : Dict = 0.0 , UpperCAmelCase_ : List[Any] = 50 , UpperCAmelCase_ : Union[str, Any] = None , UpperCAmelCase_ : List[str] = "pil" , UpperCAmelCase_ : List[str] = True , ):
self.check_inputs(A__ )
# 2. Preprocess image
UpperCamelCase__ : str = preprocess(A__ )
# 3. set timesteps
self.scheduler.set_timesteps(A__ , device=self.device )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.get_timesteps(A__ , A__ , self.device )
UpperCamelCase__ : Dict = timesteps[:1].repeat(A__ )
# 4. Prepare latent variables
UpperCamelCase__ : str = self.prepare_latents(A__ , A__ , A__ , self.unet.dtype , self.device , A__ )
UpperCamelCase__ : Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(A__ ):
# 1. predict noise model_output
UpperCamelCase__ : Union[str, Any] = self.unet(A__ , A__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ : int = self.scheduler.step(
A__ , A__ , A__ , eta=A__ , use_clipped_model_output=A__ , generator=A__ , ).prev_sample
UpperCamelCase__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : Dict = self.numpy_to_pil(A__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=A__ )
| 359 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 319 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ):
__UpperCAmelCase : Any = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
__UpperCAmelCase : Union[str, Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
return image
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : int = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Optional[int] = dct.pop(__lowerCamelCase )
__UpperCAmelCase : Any = val
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase : str = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
__UpperCAmelCase : Optional[Any] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__UpperCAmelCase : Optional[Any] = torch.cat((q_bias, torch.zeros_like(__lowerCamelCase , requires_grad=__lowerCamelCase ), v_bias) )
__UpperCAmelCase : str = qkv_bias
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : Tuple = 364 if """coco""" in model_name else 224
__UpperCAmelCase : Optional[Any] = BlipaVisionConfig(image_size=__lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=__lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=__lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
__UpperCAmelCase : Union[str, Any] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase : List[str] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
__UpperCAmelCase : int = BlipaConfig(vision_config=__lowerCamelCase , text_config=__lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=False ):
__UpperCAmelCase : Optional[int] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
__UpperCAmelCase : Union[str, Any] = tokenizer("""\n""" , add_special_tokens=__lowerCamelCase ).input_ids[0]
__UpperCAmelCase , __UpperCAmelCase : str = get_blipa_config(__lowerCamelCase , eos_token_id=__lowerCamelCase )
__UpperCAmelCase : Dict = BlipaForConditionalGeneration(__lowerCamelCase ).eval()
__UpperCAmelCase : Optional[int] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__UpperCAmelCase : str = """cuda""" if torch.cuda.is_available() else """cpu"""
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = load_model_and_preprocess(
name=__lowerCamelCase , model_type=__lowerCamelCase , is_eval=__lowerCamelCase , device=__lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
__UpperCAmelCase : Dict = original_model.state_dict()
__UpperCAmelCase : Any = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase : int = state_dict.pop(__lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
__UpperCAmelCase : Dict = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__UpperCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
__UpperCAmelCase : Optional[int] = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
__UpperCAmelCase : List[str] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
__UpperCAmelCase : Optional[int] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
__UpperCAmelCase : List[str] = key.replace("""t5""" , """language""" )
__UpperCAmelCase : int = val
# read in qv biases
read_in_q_v_bias(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : int = hf_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert len(__lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCAmelCase : str = load_demo_image()
__UpperCAmelCase : Tuple = vis_processors["""eval"""](__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(__lowerCamelCase )
# create processor
__UpperCAmelCase : Dict = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__lowerCamelCase , image_std=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = BlipaProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase )
__UpperCAmelCase : Any = processor(images=__lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(__lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowerCamelCase , __lowerCamelCase )
original_model.to(__lowerCamelCase )
hf_model.to(__lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
__UpperCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
__UpperCAmelCase : List[Any] = hf_model(__lowerCamelCase , __lowerCamelCase ).logits
else:
__UpperCAmelCase : Optional[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
__UpperCAmelCase : Union[str, Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__UpperCAmelCase : Union[str, Any] = hf_model(__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCAmelCase : Optional[int] = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCAmelCase : Optional[Any] = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowerCamelCase )
else:
# cast to same type
__UpperCAmelCase : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(__lowerCamelCase ) , __lowerCamelCase , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
__UpperCAmelCase : Dict = """"""
__UpperCAmelCase : Dict = tokenizer(__lowerCamelCase , return_tensors="""pt""" ).input_ids.to(__lowerCamelCase )
__UpperCAmelCase : Tuple = original_model.generate({"""image""": original_pixel_values} )
__UpperCAmelCase : Union[str, Any] = hf_model.generate(
__lowerCamelCase , __lowerCamelCase , do_sample=__lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , __lowerCamelCase )
__UpperCAmelCase : List[Any] = input_ids.shape[1]
__UpperCAmelCase : List[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowerCamelCase )
__UpperCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , __lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
a : str = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
a : Dict = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 114 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : List[str] = "src/diffusers"
a : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
a : Tuple = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
a : List[str] = spec.loader.load_module()
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , __lowerCamelCase ) is not None
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : Optional[int] = object_name.split(""".""" )
__UpperCAmelCase : List[Any] = 0
# First let's find the module where our object lives.
__UpperCAmelCase : Optional[Any] = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase , f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
__UpperCAmelCase : List[str] = os.path.join(__lowerCamelCase , parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
__UpperCAmelCase : List[str] = """"""
__UpperCAmelCase : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__UpperCAmelCase : List[str] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index] , __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Dict = lines[start_index:line_index]
return "".join(__lowerCamelCase )
a : Any = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
a : Optional[int] = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
a : Dict = re.compile(r"<FILL\s+[^>]*>")
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : Optional[Any] = code.split("""\n""" )
__UpperCAmelCase : str = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
__UpperCAmelCase : Tuple = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
__UpperCAmelCase : Optional[Any] = f"""class Bla:\n{code}"""
__UpperCAmelCase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__lowerCamelCase )
__UpperCAmelCase : Dict = black.format_str(__lowerCamelCase , mode=__lowerCamelCase )
__UpperCAmelCase , __UpperCAmelCase : Any = style_docstrings_in_code(__lowerCamelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]=False ):
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__UpperCAmelCase : Optional[Any] = f.readlines()
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
__UpperCAmelCase : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = search.groups()
__UpperCAmelCase : Any = find_code_in_diffusers(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = get_indent(__lowerCamelCase )
__UpperCAmelCase : Tuple = line_index + 1 if indent == theoretical_indent else line_index + 2
__UpperCAmelCase : Any = theoretical_indent
__UpperCAmelCase : Any = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__UpperCAmelCase : int = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
__UpperCAmelCase : List[Any] = lines[line_index]
__UpperCAmelCase : str = _should_continue(__lowerCamelCase , __lowerCamelCase ) and re.search(f"""^{indent}# End copy""" , __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase : Optional[int] = lines[start_index:line_index]
__UpperCAmelCase : int = """""".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
__UpperCAmelCase : Tuple = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
__UpperCAmelCase : List[Any] = """\n""".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
__UpperCAmelCase : List[str] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
__UpperCAmelCase : Any = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pattern.groups()
__UpperCAmelCase : List[str] = re.sub(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if option.strip() == "all-casing":
__UpperCAmelCase : List[Any] = re.sub(obja.lower() , obja.lower() , __lowerCamelCase )
__UpperCAmelCase : int = re.sub(obja.upper() , obja.upper() , __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__UpperCAmelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
__UpperCAmelCase : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__UpperCAmelCase : int = lines[:start_index] + [theoretical_code] + lines[line_index:]
__UpperCAmelCase : Union[str, Any] = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ ( __lowerCamelCase : bool = False ):
__UpperCAmelCase : Tuple = glob.glob(os.path.join(__lowerCamelCase , """**/*.py""" ) , recursive=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = []
for filename in all_files:
__UpperCAmelCase : str = is_copy_consistent(__lowerCamelCase , __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
__UpperCAmelCase : Union[str, Any] = """\n""".join(__lowerCamelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[int] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 114 | 1 |
'''simple docstring'''
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = """openai/whisper-base"""
lowerCamelCase = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowerCamelCase = """transcriber"""
lowerCamelCase = WhisperProcessor
lowerCamelCase = WhisperForConditionalGeneration
lowerCamelCase = ["""audio"""]
lowerCamelCase = ["""text"""]
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(UpperCamelCase__ , return_tensors='''pt''' ).input_features
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.model.generate(inputs=UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )[0]
| 360 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ ) as metadata_file:
snake_case : int = json.load(SCREAMING_SNAKE_CASE__ )
snake_case : Any = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
snake_case : Any = torch.load(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
snake_case : Dict = load_original_entity_vocab(SCREAMING_SNAKE_CASE__ )
# add an entry for [MASK2]
snake_case : List[str] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case : int = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case : Union[str, Any] = AddedToken('''<ent>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = AddedToken('''<ent2>''' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) , '''r''' ) as f:
snake_case : Tuple = json.load(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = '''MLukeTokenizer'''
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Initialize the embeddings of the special tokens
snake_case : List[str] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
snake_case : List[str] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
snake_case : List[str] = state_dict['''embeddings.word_embeddings.weight''']
snake_case : int = word_emb[ent_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
snake_case : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case : Dict = state_dict[bias_name]
snake_case : Any = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case : str = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case : Optional[Any] = F'encoder.layer.{layer_index}.attention.self.'
snake_case : int = state_dict[prefix + matrix_name]
snake_case : Union[str, Any] = state_dict[prefix + matrix_name]
snake_case : int = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case : List[Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
snake_case : Dict = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case : List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case : Optional[Any] = state_dict['''entity_predictions.bias''']
snake_case : Optional[int] = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
snake_case : List[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case : str = LukeForMaskedLM(config=SCREAMING_SNAKE_CASE__ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
snake_case : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
snake_case : int = state_dict[key]
else:
snake_case : List[str] = state_dict[key]
snake_case ,snake_case : int = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if set(SCREAMING_SNAKE_CASE__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(SCREAMING_SNAKE_CASE__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case : Optional[int] = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='''entity_classification''' )
snake_case : Tuple = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
snake_case : int = (0, 9)
snake_case : str = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors='''pt''' )
snake_case : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : Dict = torch.Size((1, 33, 768) )
snake_case : int = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : str = torch.Size((1, 1, 768) )
snake_case : Tuple = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case : str = MLukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = '''Tokyo is the capital of <mask>.'''
snake_case : Union[str, Any] = (24, 30)
snake_case : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , return_tensors='''pt''' )
snake_case : int = model(**SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = encoding['''input_ids'''][0].tolist()
snake_case : Union[str, Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
snake_case : Dict = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = outputs.entity_logits[0][0].argmax().item()
snake_case : Dict = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[str]:
'''simple docstring'''
snake_case : Dict = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
snake_case : List[Any] = [json.loads(SCREAMING_SNAKE_CASE__ ) for line in open(SCREAMING_SNAKE_CASE__ )]
snake_case : Optional[int] = {}
for entry in data:
snake_case : Optional[Any] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case : List[str] = entity_id
break
snake_case : Any = F'{language}:{entity_name}'
snake_case : List[str] = entity_id
return new_mapping
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
lowercase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 83 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCamelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class _a ( _lowercase):
_a : Dict = VOCAB_FILES_NAMES
_a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Dict = BlenderbotSmallTokenizer
def __init__( self : Tuple , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Tuple="<|endoftext|>" , _SCREAMING_SNAKE_CASE : Any="<|endoftext|>" , _SCREAMING_SNAKE_CASE : Union[str, Any]="<|endoftext|>" , _SCREAMING_SNAKE_CASE : Tuple=False , _SCREAMING_SNAKE_CASE : List[Any]=True , **_SCREAMING_SNAKE_CASE : Optional[Any] , )-> Union[str, Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=_SCREAMING_SNAKE_CASE , merges=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , ) , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : List[str] = add_prefix_space
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any=None )-> Optional[int]:
lowerCAmelCase__ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : List[str] = [self.sep_token_id]
lowerCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 131 |
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase__ : Union[str, Any] = remove_duplicates(key.upper() )
lowerCAmelCase__ : Dict = len(_a )
# First fill cipher with key characters
lowerCAmelCase__ : Any = {alphabet[i]: char for i, char in enumerate(_a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_a ) , 26 ):
lowerCAmelCase__ : List[str] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase__ : str = alphabet[i - offset]
lowerCAmelCase__ : Dict = char
return cipher_alphabet
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
return "".join(cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : int = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = input('''Enter message to encode or decode: ''' ).strip()
lowerCAmelCase__ : Tuple = input('''Enter keyword: ''' ).strip()
lowerCAmelCase__ : List[str] = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCAmelCase__ : List[Any] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCAmelCase__ : Dict = create_cipher_map(_a )
print(func(_a , _a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 131 | 1 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : Tuple = prime_factors(UpperCamelCase )
if is_square_free(UpperCamelCase ):
return -1 if len(UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
A: str = None
A: List[Any] = logging.get_logger(__name__)
A: Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
A: Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
A: Tuple = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
A: Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Tuple = ['input_ids', 'attention_mask']
__lowerCAmelCase : str = MBartTokenizer
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , src_lang=_SCREAMING_SNAKE_CASE , tgt_lang=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = vocab_file
UpperCAmelCase : Optional[int] = False if not self.vocab_file else True
UpperCAmelCase : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
UpperCAmelCase : List[Any] = {
lang_code: self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase : int = src_lang if src_lang is not None else """en_XX"""
UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
UpperCAmelCase : int = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : Union[str, Any] = self(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "en_XX" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "ro_RO" , **_SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase : int = src_lang
UpperCAmelCase : Dict = tgt_lang
return super().prepare_seqaseq_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Any = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = []
UpperCAmelCase : Tuple = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Tuple = self.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = []
UpperCAmelCase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens )
UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
UpperCAmelCase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
UpperCAmelCase : Any = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 76 | 0 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def _a ( a :Features ) -> Optional[int]:
a = np.inf
def set_batch_size(a :FeatureType ) -> None:
nonlocal batch_size
if isinstance(a , a ):
a = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(a , a ):
a = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(a , a ) and feature.dtype == "binary":
a = min(a , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(a , a )
return None if batch_size is np.inf else batch_size
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : str , __UpperCAmelCase : NestedDataStructureLike[PathLike] , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[Any] , ) ->List[Any]:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
a = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
a = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
a = Parquet(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , hash=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
if self.streaming:
a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a = None
a = None
a = None
a = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
a = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dataset , __UpperCAmelCase : Union[PathLike, BinaryIO] , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[str] , ) ->Any:
"""simple docstring"""
a = dataset
a = path_or_buf
a = batch_size or get_writer_batch_size(dataset.features )
a = parquet_writer_kwargs
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
a = self._write(file_obj=__UpperCAmelCase , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
else:
a = self._write(file_obj=self.path_or_buf , batch_size=__UpperCAmelCase , **self.parquet_writer_kwargs )
return written
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : BinaryIO , __UpperCAmelCase : int , **__UpperCAmelCase : List[str] ) ->int:
"""simple docstring"""
a = 0
a = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCAmelCase )
a = self.dataset.features.arrow_schema
a = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase , **__UpperCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __UpperCAmelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
a = query_table(
table=self.dataset._data , key=slice(__UpperCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__UpperCAmelCase )
written += batch.nbytes
writer.close()
return written
| 0 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 1_000 ):
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 153 | 0 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self):
__SCREAMING_SNAKE_CASE = {} # Mapping from char to TrieNode
__SCREAMING_SNAKE_CASE = False
def snake_case_ ( self , lowerCAmelCase__):
for word in words:
self.insert(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self
for char in word:
if char not in curr.nodes:
__SCREAMING_SNAKE_CASE = TrieNode()
__SCREAMING_SNAKE_CASE = curr.nodes[char]
__SCREAMING_SNAKE_CASE = True
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self
for char in word:
if char not in curr.nodes:
return False
__SCREAMING_SNAKE_CASE = curr.nodes[char]
return curr.is_leaf
def snake_case_ ( self , lowerCAmelCase__):
def _delete(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) -> bool:
if index == len(lowerCAmelCase__):
# If word does not exist
if not curr.is_leaf:
return False
__SCREAMING_SNAKE_CASE = False
return len(curr.nodes) == 0
__SCREAMING_SNAKE_CASE = word[index]
__SCREAMING_SNAKE_CASE = curr.nodes.get(lowerCAmelCase__)
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
__SCREAMING_SNAKE_CASE = _delete(lowerCAmelCase__ , lowerCAmelCase__ , index + 1)
if delete_curr:
del curr.nodes[char]
return len(curr.nodes) == 0
return delete_curr
_delete(self , lowerCAmelCase__ , 0)
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if node.is_leaf:
print(UpperCamelCase_ , end=""" """ )
for key, value in node.nodes.items():
print_words(UpperCamelCase_ , word + key )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = """banana bananas bandana band apple all beast""".split()
__SCREAMING_SNAKE_CASE = TrieNode()
root.insert_many(UpperCamelCase_ )
# print_words(root, "")
assert all(root.find(UpperCamelCase_ ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
print(str(UpperCamelCase_ ) , """works!""" if passes else """doesn't work :(""" )
def _lowerCAmelCase ( ):
assert test_trie()
def _lowerCAmelCase ( ):
print_results("""Testing trie functionality""" , test_trie() )
if __name__ == "__main__":
main()
| 255 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Any = RoCBertTokenizer
__lowercase : List[str] = None
__lowercase : Union[str, Any] = False
__lowercase : Optional[Any] = True
__lowercase : int = filter_non_english
def snake_case_ ( self):
super().setUp()
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
for i, value in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
with open(self.word_shape_file , """w""" , encoding="""utf-8""") as word_shape_writer:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__)
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""") as word_pronunciation_writer:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""你好[SEP]你是谁""")
self.assertListEqual(lowerCAmelCase__ , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase__) , [5, 6, 2, 5, 7, 8])
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase__) , [5, 6, 2, 5, 7, 8])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""") , ["""ah""", """\u535A""", """\u63A8""", """zz"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""hello""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""h\u00E9llo"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""])
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""hello"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = RoCBertBasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=["""[UNK]"""])
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""") , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = RoCBertWordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""")
self.assertListEqual(tokenizer.tokenize("""""") , [])
self.assertListEqual(tokenizer.tokenize("""unwanted running""") , ["""un""", """##want""", """##ed""", """runn""", """##ing"""])
self.assertListEqual(tokenizer.tokenize("""unwantedX running""") , ["""[UNK]""", """runn""", """##ing"""])
def snake_case_ ( self):
self.assertTrue(_is_whitespace(""" """))
self.assertTrue(_is_whitespace("""\t"""))
self.assertTrue(_is_whitespace("""\r"""))
self.assertTrue(_is_whitespace("""\n"""))
self.assertTrue(_is_whitespace("""\u00A0"""))
self.assertFalse(_is_whitespace("""A"""))
self.assertFalse(_is_whitespace("""-"""))
def snake_case_ ( self):
self.assertTrue(_is_control("""\u0005"""))
self.assertFalse(_is_control("""A"""))
self.assertFalse(_is_control(""" """))
self.assertFalse(_is_control("""\t"""))
self.assertFalse(_is_control("""\r"""))
def snake_case_ ( self):
self.assertTrue(_is_punctuation("""-"""))
self.assertTrue(_is_punctuation("""$"""))
self.assertTrue(_is_punctuation("""`"""))
self.assertTrue(_is_punctuation("""."""))
self.assertFalse(_is_punctuation("""A"""))
self.assertFalse(_is_punctuation(""" """))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
if self.test_rust_tokenizer:
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]])
def snake_case_ ( self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , """do_lower_case""") else False
__SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """Allen"""),
((2_1, 2_3), """##NL"""),
((2_3, 2_4), """##P"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), """allen"""),
((2_1, 2_3), """##nl"""),
((2_3, 2_4), """##p"""),
((2_5, 3_3), """sentence"""),
((3_3, 3_4), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""]))
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ["""的""", """人""", """有"""]
__SCREAMING_SNAKE_CASE = """""".join(lowerCAmelCase__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__)
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__)
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""你好""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""你是谁""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.get_tokenizers(do_lower_case=lowerCAmelCase__)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
__SCREAMING_SNAKE_CASE = """你好,你是谁"""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_shape_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_pronunciation_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.prepare_for_model(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
| 255 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
SCREAMING_SNAKE_CASE_ = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = VOCAB_FILES_NAMES
__snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : int = "left"
def __init__( self : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str=False ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : int="</s>" ,lowerCamelCase__ : Any="<unk>" ,lowerCamelCase__ : str="<sep>" ,lowerCamelCase__ : Tuple="<pad>" ,lowerCamelCase__ : Optional[int]="<cls>" ,lowerCamelCase__ : Any="<mask>" ,lowerCamelCase__ : Union[str, Any]=["<eop>", "<eod>"] ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,**lowerCamelCase__ : Union[str, Any] ,) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,additional_special_tokens=lowerCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[str] ,lowerCamelCase__ : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
if self.remove_space:
SCREAMING_SNAKE_CASE = """ """.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE = inputs
SCREAMING_SNAKE_CASE = outputs.replace("""``""" ,"""\"""" ).replace("""''""" ,"""\"""" )
if not self.keep_accents:
SCREAMING_SNAKE_CASE = unicodedata.normalize("""NFKD""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """""".join([c for c in outputs if not unicodedata.combining(lowerCamelCase__ )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.preprocess_text(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.sp_model.encode(lowerCamelCase__ ,out_type=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(lowerCamelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCamelCase__ ,"""""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCamelCase__ )
else:
new_pieces.append(lowerCamelCase__ )
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__ ,""" """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : bool = True ,**lowerCamelCase__ : List[Any] ,) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.pop("""use_source_tokenizer""" ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = []
sub_texts.append(lowerCamelCase__ )
else:
current_sub_text.append(lowerCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE = """""".join(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE = self.clean_up_tokenization(lowerCamelCase__ )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1, 1]
return ([0] * len(lowerCamelCase__ )) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase__ ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ ,"""wb""" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 296 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SwinConfig()
SCREAMING_SNAKE_CASE = swin_name.split("""_""" )
SCREAMING_SNAKE_CASE = name_split[1]
SCREAMING_SNAKE_CASE = int(name_split[4] )
SCREAMING_SNAKE_CASE = int(name_split[3][-1] )
if model_size == "tiny":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "small":
SCREAMING_SNAKE_CASE = 96
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (3, 6, 12, 24)
elif model_size == "base":
SCREAMING_SNAKE_CASE = 1_28
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (4, 8, 16, 32)
else:
SCREAMING_SNAKE_CASE = 1_92
SCREAMING_SNAKE_CASE = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE = (6, 12, 24, 48)
if "in22k" in swin_name:
SCREAMING_SNAKE_CASE = 2_18_41
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = img_size
SCREAMING_SNAKE_CASE = num_classes
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """swin.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "mask" in key:
continue
elif "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[1] )
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[
:dim
]
SCREAMING_SNAKE_CASE = val[
dim : dim * 2
]
SCREAMING_SNAKE_CASE = val[
-dim:
]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
SCREAMING_SNAKE_CASE = get_swin_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = SwinForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE = convert_state_dict(timm_model.state_dict() , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = timm_model(inputs["""pixel_values"""] )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 296 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowerCamelCase(__UpperCamelCase ) -> str:
if "://" in dataset_path:
_lowerCAmelCase =dataset_path.split("""://""" )[1]
return dataset_path
def _lowerCamelCase(__UpperCamelCase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowerCamelCase(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
_lowerCAmelCase =not is_remote_filesystem(_lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCAmelCase ) , fs._strip_protocol(_lowerCAmelCase ) )
else:
fs.mv(_lowerCAmelCase , _lowerCAmelCase , recursive=_lowerCAmelCase )
def _lowerCamelCase() -> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =threading.Lock()
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
def UpperCamelCase ( __lowercase : int ,__lowercase : int ):
'''simple docstring'''
while a != 0:
A_ , A_ : Union[str, Any] = b % a, a
return b
def UpperCamelCase ( __lowercase : int ,__lowercase : int ):
'''simple docstring'''
if gcd(__lowercase ,__lowercase ) != 1:
A_ : Any = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(__lowercase )
A_ , A_ , A_ : Union[str, Any] = 1, 0, a
A_ , A_ , A_ : Optional[int] = 0, 1, m
while va != 0:
A_ : Tuple = ua // va
A_ , A_ , A_ , A_ , A_ , A_ : Optional[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 140 | import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if hor == 1_28:
A_ : List[Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A_ : Tuple = (32, 1_28, 2_56)
A_ : Optional[int] = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
A_ : Union[str, Any] = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
A_ : Any = (32, 64, 1_28, 2_56)
A_ : int = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
A_ : List[str] = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
A_ : List[Any] = model.state_dict()
A_ : List[str] = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
A_ : Union[str, Any] = UNetaDModel(**__lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A_ : Optional[Any] = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A_ : Optional[int] = state_dict.pop(__lowercase )
hf_value_function.load_state_dict(__lowercase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__lowercase ,__lowercase )
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Any = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
A_ : Union[str, Any] = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
A_ : List[Any] = model
A_ : Union[str, Any] = UNetaDModel(**__lowercase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
A_ : Optional[int] = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
A_ : List[str] = state_dict.pop(__lowercase )
hf_value_function.load_state_dict(__lowercase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__lowercase ,__lowercase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 140 | 1 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowercase__ : Any = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[Any] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Optional[Any] = getattr(_a , _a ).shape
else:
snake_case_ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : str = value
elif weight_type == "weight_g":
snake_case_ : Any = value
elif weight_type == "weight_v":
snake_case_ : Optional[int] = value
elif weight_type == "bias":
snake_case_ : List[Any] = value
else:
snake_case_ : Optional[int] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
snake_case_ : str = []
snake_case_ : Optional[Any] = fairseq_model.state_dict()
snake_case_ : List[str] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : int = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ : Dict = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : Optional[int] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ : Tuple = True
if "*" in mapped_key:
snake_case_ : Dict = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : int = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : str = '''weight_g'''
elif "weight_v" in name:
snake_case_ : Dict = '''weight_v'''
elif "bias" in name:
snake_case_ : Tuple = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : Optional[Any] = '''weight'''
else:
snake_case_ : Tuple = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
def __lowercase ( _a , _a , _a , _a , _a ):
snake_case_ : Optional[Any] = full_name.split('''conv_layers.''' )[-1]
snake_case_ : Optional[Any] = name.split('''.''' )
snake_case_ : str = int(items[0] )
snake_case_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
snake_case_ : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
snake_case_ : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
snake_case_ : Optional[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
snake_case_ : str = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_a )
@torch.no_grad()
def __lowercase ( _a , _a , _a=None , _a=None , _a=True ):
if config_path is not None:
snake_case_ : Any = UniSpeechSatConfig.from_pretrained(_a )
else:
snake_case_ : str = UniSpeechSatConfig()
snake_case_ : Dict = ''''''
if is_finetuned:
snake_case_ : Optional[Any] = UniSpeechSatForCTC(_a )
else:
snake_case_ : Optional[int] = UniSpeechSatForPreTraining(_a )
snake_case_, snake_case_, snake_case_ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case_ : Tuple = model[0].eval()
recursively_load_weights(_a , _a )
hf_wavavec.save_pretrained(_a )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowercase__ : List[str] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 155 |
"""simple docstring"""
def __lowercase ( _a , _a ):
return base * power(_a , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
lowercase__ : Optional[Any] = int(input('''Enter the base: ''').strip())
lowercase__ : int = int(input('''Enter the exponent: ''').strip())
lowercase__ : int = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowercase__ : Any = 1 / result
print(f'{base} to the power of {exponent} is {result}')
| 155 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCamelCase : Optional[Any] = """
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
"""
class UpperCamelCase_ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
A__ = load_tool('''text-question-answering''')
self.tool.setup()
A__ = load_tool('''text-question-answering''' , remote=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = self.tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''')
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->str:
'''simple docstring'''
A__ = self.remote_tool(UpperCAmelCase__ , '''What did Hugging Face do in April 2021?''')
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
A__ = self.tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''')
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
A__ = self.remote_tool(text=UpperCAmelCase__ , question='''What did Hugging Face do in April 2021?''')
self.assertEqual(UpperCAmelCase__ , '''launched the BigScience Research Workshop''')
| 14 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> tuple[float, list[float]]:
"""simple docstring"""
A__ = list(range(len(lowercase_ ) ) )
A__ = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
A__ = 0
A__ = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
A__ = 1
max_value += value[i]
capacity -= weight[i]
else:
A__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a : Dict , a : Optional[int]=13 , a : List[str]=7 , a : int=True , a : List[Any]=True , a : List[Any]=True , a : Tuple=True , a : Union[str, Any]=99 , a : str=16 , a : Union[str, Any]=36 , a : Optional[Any]=6 , a : Dict=6 , a : Optional[int]=6 , a : int=37 , a : str="gelu" , a : str=0.1 , a : List[str]=0.1 , a : Optional[int]=512 , a : Optional[Any]=16 , a : Union[str, Any]=2 , a : Union[str, Any]=0.02 , a : Dict=3 , a : Optional[Any]=4 , a : List[str]=None , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = embedding_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_hidden_groups
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : List[Any] , a : Optional[Any] , a : Optional[int] , a : Tuple , a : int , a : Union[str, Any] )-> Any:
"""simple docstring"""
lowercase__ = AlbertModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
lowercase__ = model(_snake_case , token_type_ids=_snake_case )
lowercase__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : str , a : Tuple , a : List[Any] , a : Union[str, Any] , a : List[str] )-> str:
"""simple docstring"""
lowercase__ = AlbertForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , sentence_order_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Union[str, Any] , a : Union[str, Any] , a : Optional[int] , a : str , a : Tuple , a : Any , a : Tuple )-> Dict:
"""simple docstring"""
lowercase__ = AlbertForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Union[str, Any] , a : List[Any] , a : Tuple , a : str , a : Optional[int] , a : Tuple , a : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = AlbertForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Any , a : Optional[Any] , a : List[str] , a : Optional[Any] , a : int , a : Union[str, Any] , a : Optional[int] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = AlbertForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[int] , a : Optional[int] , a : Dict , a : Union[str, Any] , a : Tuple , a : Union[str, Any] , a : Optional[int] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = AlbertForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[Any] , a : Tuple , a : Any , a : int , a : List[str] , a : List[Any] , a : Tuple )-> Any:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = AlbertForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Any = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Any , a : List[str] , a : int=False )-> Optional[Any]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
lowercase__ = AlbertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AlbertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = AlbertModel.from_pretrained('albert-base-v2' )
lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__ = model(_snake_case , attention_mask=_snake_case )[0]
lowercase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _snake_case )
lowercase__ = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
| 361 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = 'transfo-xl'
_UpperCamelCase : Any = ['mems']
_UpperCamelCase : Any = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , a : Optional[int]=267_735 , a : str=[20_000, 40_000, 200_000] , a : str=1_024 , a : str=1_024 , a : int=16 , a : Optional[int]=64 , a : Optional[int]=4_096 , a : int=4 , a : Tuple=False , a : Any=18 , a : Tuple=1_600 , a : Union[str, Any]=1_000 , a : str=True , a : Dict=True , a : Any=0 , a : List[Any]=-1 , a : List[Any]=True , a : Tuple=0.1 , a : List[Any]=0.0 , a : Optional[Any]=True , a : int="normal" , a : Optional[Any]=0.01 , a : str=0.01 , a : List[Any]=0.02 , a : List[Any]=1E-5 , a : Optional[Any]=0 , **a : Optional[int] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = []
self.cutoffs.extend(a )
if proj_share_all_but_first:
lowercase__ = [False] + [True] * len(self.cutoffs )
else:
lowercase__ = [False] + [False] * len(self.cutoffs )
lowercase__ = d_model
lowercase__ = d_embed
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = div_val
lowercase__ = pre_lnorm
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = mem_len
lowercase__ = same_length
lowercase__ = attn_type
lowercase__ = clamp_len
lowercase__ = sample_softmax
lowercase__ = adaptive
lowercase__ = dropout
lowercase__ = dropatt
lowercase__ = untie_r
lowercase__ = init
lowercase__ = init_range
lowercase__ = proj_init_std
lowercase__ = init_std
lowercase__ = layer_norm_epsilon
super().__init__(eos_token_id=a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Optional[int] )-> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 269 | 0 |
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ = 4_000_000 ):
"""simple docstring"""
A__ = []
A__ , A__ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(UpperCamelCase__ )
A__ , A__ = b, a + b
return sum(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 221 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCamelCase__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = ['pixel_values']
def __init__( self ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = 1 / 2_55 ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> None:
super().__init__(**__UpperCAmelCase )
A__ = size if size is not None else {'shortest_edge': 2_56}
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = offset
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
if "shortest_edge" in size:
A__ = get_resize_output_image_size(__UpperCAmelCase ,size['shortest_edge'] ,default_to_square=__UpperCAmelCase )
elif "height" in size and "width" in size:
A__ = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
A__ = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase ,size=(size['height'], size['width']) ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Optional[Any]:
A__ = image.astype(np.floataa )
if offset:
A__ = image - (scale / 2)
return rescale(__UpperCAmelCase ,scale=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> np.ndarray:
return normalize(__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A__ = to_numpy_array(__UpperCAmelCase )
if do_resize:
A__ = self.resize(image=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase )
if do_center_crop:
A__ = self.center_crop(__UpperCAmelCase ,size=__UpperCAmelCase )
if do_rescale:
A__ = self.rescale(image=__UpperCAmelCase ,scale=__UpperCAmelCase ,offset=__UpperCAmelCase )
if do_normalize:
A__ = self.normalize(image=__UpperCAmelCase ,mean=__UpperCAmelCase ,std=__UpperCAmelCase )
A__ = to_channel_dimension_format(__UpperCAmelCase ,__UpperCAmelCase )
return image
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> PIL.Image.Image:
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = offset if offset is not None else self.offset
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(__UpperCAmelCase ,default_to_square=__UpperCAmelCase )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(__UpperCAmelCase ,param_name='crop_size' )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A__ = make_batched(__UpperCAmelCase )
A__ = [
[
self._preprocess_image(
image=__UpperCAmelCase ,do_resize=__UpperCAmelCase ,size=__UpperCAmelCase ,resample=__UpperCAmelCase ,do_center_crop=__UpperCAmelCase ,crop_size=__UpperCAmelCase ,do_rescale=__UpperCAmelCase ,rescale_factor=__UpperCAmelCase ,offset=__UpperCAmelCase ,do_normalize=__UpperCAmelCase ,image_mean=__UpperCAmelCase ,image_std=__UpperCAmelCase ,data_format=__UpperCAmelCase ,)
for img in video
]
for video in videos
]
A__ = {'pixel_values': videos}
return BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
| 221 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : List[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = ['''MobileViTFeatureExtractor''']
lowerCAmelCase_ : str = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 170 |
'''simple docstring'''
from __future__ import annotations
def __A ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def _a ( UpperCamelCase_ : int="" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = tempfile.mkdtemp()
return os.path.join(UpperCamelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase__ = AgentAudio(__UpperCAmelCase )
lowerCAmelCase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
lowerCAmelCase__ , lowerCAmelCase__ = sf.read(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , torch.tensor(__UpperCAmelCase ) , atol=1E-4 ) )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase__ = get_new_path(suffix=".wav" )
sf.write(__UpperCAmelCase , __UpperCAmelCase , 16000 )
lowerCAmelCase__ = AgentAudio(__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , __UpperCAmelCase )
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = torch.randint(0 , 256 , (64, 64, 3) )
lowerCAmelCase__ = AgentImage(__UpperCAmelCase )
lowerCAmelCase__ = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(__UpperCAmelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowerCAmelCase__ = Image.open(__UpperCAmelCase )
lowerCAmelCase__ = AgentImage(__UpperCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png"
lowerCAmelCase__ = Image.open(__UpperCAmelCase )
lowerCAmelCase__ = AgentImage(__UpperCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase ( self )-> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "Hey!"
lowerCAmelCase__ = AgentText(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , agent_type.to_string() )
self.assertEqual(__UpperCAmelCase , agent_type.to_raw() )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 340 |
from __future__ import annotations
import os
from collections.abc import Mapping
a_ = tuple[int, int]
class lowercase__ :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = vertices
lowerCAmelCase__ = {
(min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase__ = weight
def UpperCAmelCase ( self )-> Graph:
'''simple docstring'''
lowerCAmelCase__ = Graph({min(self.vertices )} , {} )
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase__ = edge
lowerCAmelCase__ = weight
subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase )
return subgraph
def _a ( UpperCamelCase_ : str = "p107_network.txt" ) -> int:
"""simple docstring"""
lowerCAmelCase__ = os.path.abspath(os.path.dirname(UpperCamelCase_ ) )
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = {}
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
with open(UpperCamelCase_ ) as f:
lowerCAmelCase__ = f.read().strip().split("\n" )
lowerCAmelCase__ = [line.split("," ) for line in data]
for edgea in range(1 , len(UpperCamelCase_ ) ):
for edgea in range(UpperCamelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase__ = int(adjaceny_matrix[edgea][edgea] )
lowerCAmelCase__ = Graph(set(range(len(UpperCamelCase_ ) ) ) , UpperCamelCase_ )
lowerCAmelCase__ = graph.prims_algorithm()
lowerCAmelCase__ = sum(graph.edges.values() )
lowerCAmelCase__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Dict = DebertaTokenizer
__lowercase : List[Any] = True
__lowercase : Tuple = DebertaTokenizerFast
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase = {'unk_token': '[UNK]'}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def __UpperCamelCase ( self , **A_ ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase ( self , A_ ) -> Any:
"""simple docstring"""
UpperCamelCase = 'lower newer'
UpperCamelCase = 'lower newer'
return input_text, output_text
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = 'lower newer'
UpperCamelCase = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase = tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = tokenizer('Hello' , 'World' )
UpperCamelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , A_ )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
UpperCamelCase = tokenizer.encode('sequence builders' , add_special_tokens=A_ )
UpperCamelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ )
UpperCamelCase = tokenizer.encode(
'sequence builders' , add_special_tokens=A_ , add_prefix_space=A_ )
UpperCamelCase = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=A_ , add_prefix_space=A_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase = tokenizer_class.from_pretrained('microsoft/deberta-base' )
UpperCamelCase = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
UpperCamelCase = tokenizer(A_ , padding=A_ )
UpperCamelCase = [tokenizer.decode(A_ , skip_special_tokens=A_ ) for seq in encoding['input_ids']]
# fmt: off
UpperCamelCase = {
'input_ids': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , A_ )
for expected, decoded in zip(A_ , A_ ):
self.assertEqual(A_ , A_ )
| 367 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A ( ) -> int:
'''simple docstring'''
UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase = Image.open(requests.get(lowercase , stream=lowercase ).raw ).convert('RGB' )
return image
def A ( lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def A ( lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase )
UpperCamelCase = val
def A ( lowercase , lowercase ) -> List[str]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase = torch.cat((q_bias, torch.zeros_like(lowercase , requires_grad=lowercase ), v_bias) )
UpperCamelCase = qkv_bias
def A ( lowercase , lowercase ) -> int:
'''simple docstring'''
UpperCamelCase = 364 if 'coco' in model_name else 224
UpperCamelCase = BlipaVisionConfig(image_size=lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase = BlipaConfig(vision_config=lowercase , text_config=lowercase )
return config, image_size
@torch.no_grad()
def A ( lowercase , lowercase=None , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase = tokenizer('\n' , add_special_tokens=lowercase ).input_ids[0]
UpperCamelCase , UpperCamelCase = get_blipa_config(lowercase , eos_token_id=lowercase )
UpperCamelCase = BlipaForConditionalGeneration(lowercase ).eval()
UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase = load_model_and_preprocess(
name=lowercase , model_type=lowercase , is_eval=lowercase , device=lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase = original_model.state_dict()
UpperCamelCase = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase = state_dict.pop(lowercase )
if key.startswith('Qformer.bert' ):
UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase = key.replace('t5' , 'language' )
UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(lowercase , lowercase )
UpperCamelCase , UpperCamelCase = hf_model.load_state_dict(lowercase , strict=lowercase )
assert len(lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase = load_demo_image()
UpperCamelCase = vis_processors['eval'](lowercase ).unsqueeze(0 ).to(lowercase )
UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowercase )
# create processor
UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowercase , image_std=lowercase )
UpperCamelCase = BlipaProcessor(image_processor=lowercase , tokenizer=lowercase )
UpperCamelCase = processor(images=lowercase , return_tensors='pt' ).pixel_values.to(lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase , lowercase )
original_model.to(lowercase )
hf_model.to(lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase = hf_model(lowercase , lowercase ).logits
else:
UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase = hf_model(lowercase , lowercase , labels=lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=lowercase )
assert torch.allclose(logits[0, :3, :3] , lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=lowercase )
else:
# cast to same type
UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(lowercase ) , lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase = ''
UpperCamelCase = tokenizer(lowercase , return_tensors='pt' ).input_ids.to(lowercase )
UpperCamelCase = original_model.generate({'image': original_pixel_values} )
UpperCamelCase = hf_model.generate(
lowercase , lowercase , do_sample=lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowercase )
UpperCamelCase = input_ids.shape[1]
UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase )
UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase )
hf_model.save_pretrained(lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
_UpperCAmelCase : str = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 110 | 0 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowercase__ = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
lowercase__ = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
lowercase__ = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase=None , lowercase=True , lowercase=False) -> Union[str, Any]:
'''simple docstring'''
if rouge_types is None:
a__: Union[str, Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
a__: Any = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase)
if use_aggregator:
a__: Dict = scoring.BootstrapAggregator()
else:
a__: Optional[Any] = []
for ref, pred in zip(lowercase , lowercase):
a__: List[str] = scorer.score(lowercase , lowercase)
if use_aggregator:
aggregator.add_scores(lowercase)
else:
scores.append(lowercase)
if use_aggregator:
a__: List[str] = aggregator.aggregate()
else:
a__: Any = {}
for key in scores[0]:
a__: Optional[Any] = [score[key] for score in scores]
return result
| 290 | """simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowercase__ = TypeVar('T')
lowercase__ = Union[List[T], Tuple[T, ...]]
lowercase__ = Union[T, List[T], Dict[str, T]]
lowercase__ = Union[str, bytes, os.PathLike]
| 290 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : list ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(A_ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(A_ ) == 1:
return True
_lowerCamelCase : Optional[int] = series[1] - series[0]
for index in range(len(A_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def snake_case_ ( A_ : list ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(A_ ) == 0:
raise ValueError('''Input list must be a non empty list''' )
_lowerCamelCase : List[str] = 0
for val in series:
answer += val
return answer / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "xglm"
snake_case__ : Dict = ["past_key_values"]
snake_case__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=2_5_6_0_0_8 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : Dict=1_0_2_4 , __lowerCAmelCase : List[str]=4_0_9_6 , __lowerCAmelCase : Tuple=2_4 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[Any]=2 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : int = d_model
_lowerCamelCase : Optional[Any] = ffn_dim
_lowerCamelCase : Any = num_layers
_lowerCamelCase : Union[str, Any] = attention_heads
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : str = use_cache
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 175 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _A ( lowercase__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
def _A ( lowercase__ ):
# word like '180' or '身高' or '神'
for char in word:
lowercase__ = ord(lowercase__ )
if not _is_chinese_char(lowercase__ ):
return 0
return 1
def _A ( lowercase__ ):
lowercase__ = set()
for token in tokens:
lowercase__ = len(lowercase__ ) > 1 and is_chinese(lowercase__ )
if chinese_word:
word_set.add(lowercase__ )
lowercase__ = list(lowercase__ )
return word_list
def _A ( lowercase__ , lowercase__ ):
if not chinese_word_set:
return bert_tokens
lowercase__ = max([len(lowercase__ ) for w in chinese_word_set] )
lowercase__ = bert_tokens
lowercase__ , lowercase__ = 0, len(lowercase__ )
while start < end:
lowercase__ = True
if is_chinese(bert_word[start] ):
lowercase__ = min(end - start , lowercase__ )
for i in range(lowercase__ , 1 , -1 ):
lowercase__ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase__ = """##""" + bert_word[j]
lowercase__ = start + i
lowercase__ = False
break
if single_word:
start += 1
return bert_word
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = []
for i in range(0 , len(lowercase__ ) , 100 ):
lowercase__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""] ).cws
lowercase__ = [get_chinese_word(lowercase__ ) for r in res]
ltp_res.extend(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
lowercase__ = []
for i in range(0 , len(lowercase__ ) , 100 ):
lowercase__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowercase__ , truncation=lowercase__ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(lowercase__ ) == len(lowercase__ )
lowercase__ = []
for input_ids, chinese_word in zip(lowercase__ , lowercase__ ):
lowercase__ = []
for id in input_ids:
lowercase__ = bert_tokenizer._convert_id_to_token(lowercase__ )
input_tokens.append(lowercase__ )
lowercase__ = add_sub_symbol(lowercase__ , lowercase__ )
lowercase__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase__ ):
if token[:2] == "##":
lowercase__ = token[2:]
# save chinese tokens' pos
if len(lowercase__ ) == 1 and _is_chinese_char(ord(lowercase__ ) ):
ref_id.append(lowercase__ )
ref_ids.append(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
return ref_ids
def _A ( lowercase__ ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase__ = f.readlines()
lowercase__ = [line.strip() for line in data if len(lowercase__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase__ = LTP(args.ltp ) # faster in GPU device
lowercase__ = BertTokenizer.from_pretrained(args.bert )
lowercase__ = prepare_ref(lowercase__ , lowercase__ , lowercase__ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase__ = [json.dumps(lowercase__ ) + """\n""" for ref in ref_ids]
f.writelines(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
__A = parser.parse_args()
main(args)
| 164 | """simple docstring"""
import requests
UpperCAmelCase__ = """""" # <-- Put your OpenWeatherMap appid here!
UpperCAmelCase__ = """https://api.openweathermap.org/data/2.5/"""
def __UpperCAmelCase ( lowercase = "Chicago" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """weather""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = "Kolkata, India" ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" ,params=locals() ).json()
def __UpperCAmelCase ( lowercase = 55.68 ,lowercase = 12.57 ,lowercase = APPID ):
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
UpperCAmelCase__ = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 289 | 0 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a = False
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Any=32 ):
set_seed(0 )
_A = UNetaDModel(sample_size=_A , in_channels=3 , out_channels=3 )
_A = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def lowerCAmelCase_ ( self : Any ):
_A = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_A = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=_A , )
_A = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=_A , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_A = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_A ) for _ in range(4 )]
_A = [torch.randn((4, 3, 32, 32) ).to(_A ) for _ in range(4 )]
_A = [torch.randint(0 , 1_000 , (4,) ).long().to(_A ) for _ in range(4 )]
# train with a DDPM scheduler
_A = self.get_model_optimizer(resolution=32 )
model.train().to(_A )
for i in range(4 ):
optimizer.zero_grad()
_A = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_A = model(_A , timesteps[i] ).sample
_A = torch.nn.functional.mse_loss(_A , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_A = self.get_model_optimizer(resolution=32 )
model.train().to(_A )
for i in range(4 ):
optimizer.zero_grad()
_A = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_A = model(_A , timesteps[i] ).sample
_A = torch.nn.functional.mse_loss(_A , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_A , _A , atol=1E-5 ) )
self.assertTrue(torch.allclose(_A , _A , atol=1E-5 ) )
| 350 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a = logging.getLogger(__name__)
def _snake_case ( _snake_case : str , _snake_case : Tuple ) -> Any:
'''simple docstring'''
if os.path.exists(_snake_case ):
if os.path.exists(os.path.join(_snake_case , 'config.json' ) ) and os.path.isfile(
os.path.join(_snake_case , 'config.json' ) ):
os.remove(os.path.join(_snake_case , 'config.json' ) )
if os.path.exists(os.path.join(_snake_case , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_snake_case , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_snake_case , 'pytorch_model.bin' ) )
else:
os.makedirs(_snake_case )
model.save_pretrained(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[int]=False ) -> Tuple:
'''simple docstring'''
_A = 2
if unlogit:
_A = torch.pow(_snake_case , _snake_case )
_A = p * torch.log(_snake_case )
_A = 0
return -plogp.sum(dim=-1 )
def _snake_case ( _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(_snake_case ) ) ) )
for row in range(len(_snake_case ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _snake_case ( _snake_case : List[str] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Union[str, Any]=True , _snake_case : Any=True , _snake_case : List[str]=None , _snake_case : List[Any]=False ) -> int:
'''simple docstring'''
_A , _A = model.config.num_hidden_layers, model.config.num_attention_heads
_A = torch.zeros(_snake_case , _snake_case ).to(args.device )
_A = torch.zeros(_snake_case , _snake_case ).to(args.device )
if head_mask is None:
_A = torch.ones(_snake_case , _snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=_snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A = None
_A = 0.0
_A = 0.0
for step, inputs in enumerate(tqdm(_snake_case , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_A = tuple(t.to(args.device ) for t in inputs )
((_A) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A = model(_snake_case , labels=_snake_case , head_mask=_snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A , _A , _A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_snake_case ):
_A = entropy(attn.detach() , _snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A = 2
_A = torch.pow(torch.pow(_snake_case , _snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_snake_case )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_snake_case )
logger.info('Head ranked by importance scores' )
_A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A = torch.arange(
head_importance.numel() , device=args.device )
_A = head_ranks.view_as(_snake_case )
print_ad_tensor(_snake_case )
return attn_entropy, head_importance, total_loss
def _snake_case ( _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] ) -> List[str]:
'''simple docstring'''
_A , _A , _A = compute_heads_importance(_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case )
_A = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _snake_case , original_score * args.masking_threshold )
_A = torch.ones_like(_snake_case )
_A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A = original_score
while current_score >= original_score * args.masking_threshold:
_A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A = float('Inf' )
_A = head_importance.view(-1 ).sort()[1]
if len(_snake_case ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_A = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_A = new_head_mask.view(-1 )
_A = 0.0
_A = new_head_mask.view_as(_snake_case )
_A = new_head_mask.clone().detach()
print_ad_tensor(_snake_case )
# Compute metric and head importance again
_A , _A , _A = compute_heads_importance(
_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case , head_mask=_snake_case )
_A = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(_snake_case )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case , compute_importance=_snake_case , head_mask=_snake_case )
_A = 1 / loss
_A = datetime.now() - before_time
_A = sum(p.numel() for p in model.parameters() )
_A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(_snake_case , _snake_case ):
_A = [
v,
]
assert sum(len(_snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_snake_case )
_A = sum(p.numel() for p in model.parameters() )
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case , compute_importance=_snake_case , head_mask=_snake_case , actually_pruned=_snake_case , )
_A = 1 / loss
_A = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _snake_case , _snake_case , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _snake_case , _snake_case )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(_snake_case , args.output_dir )
def _snake_case ( ) -> Dict:
'''simple docstring'''
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_snake_case , type=_snake_case , required=_snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_snake_case , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_snake_case , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_snake_case , type=_snake_case , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_snake_case , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_snake_case , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_snake_case , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_snake_case , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=_snake_case , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_snake_case , help='Batch size.' )
parser.add_argument('--seed' , type=_snake_case , default=42 )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
_A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A = torch.device('cuda' , args.local_rank )
_A = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A = nn.parallel.DistributedDataParallel(
_snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_snake_case )
elif args.n_gpu > 1:
_A = nn.DataParallel(_snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_snake_case )
torch.save(_snake_case , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _snake_case )
# Prepare dataset
_A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A = (torch.from_numpy(_snake_case ),)
_A = TensorDataset(*_snake_case )
_A = RandomSampler(_snake_case )
_A = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_snake_case , _snake_case , _snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A = mask_heads(_snake_case , _snake_case , _snake_case )
prune_heads(_snake_case , _snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
main()
| 271 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
UpperCAmelCase__ = '''.'''
if __name__ == "__main__":
UpperCAmelCase__ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
UpperCAmelCase__ = []
UpperCAmelCase__ = []
with open(doctest_file_path) as fp:
for line in fp:
UpperCAmelCase__ = line.strip()
UpperCAmelCase__ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
UpperCAmelCase__ = '''\n'''.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 5 |
from math import ceil
def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 274 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> str:
lowerCAmelCase_ :Union[str, Any] = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :List[Any] = seq_length
lowerCAmelCase_ :List[str] = is_training
lowerCAmelCase_ :List[Any] = use_input_mask
lowerCAmelCase_ :Any = use_token_type_ids
lowerCAmelCase_ :Dict = use_labels
lowerCAmelCase_ :str = vocab_size
lowerCAmelCase_ :Tuple = hidden_size
lowerCAmelCase_ :Any = num_hidden_layers
lowerCAmelCase_ :Union[str, Any] = num_attention_heads
lowerCAmelCase_ :Any = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ :Any = max_position_embeddings
lowerCAmelCase_ :Any = type_vocab_size
lowerCAmelCase_ :Union[str, Any] = type_sequence_label_size
lowerCAmelCase_ :Dict = initializer_range
lowerCAmelCase_ :List[str] = num_labels
lowerCAmelCase_ :List[Any] = num_choices
lowerCAmelCase_ :int = scope
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :List[Any] = None
if self.use_input_mask:
lowerCAmelCase_ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Any:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = NystromformerModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A , token_type_ids=__A )
lowerCAmelCase_ :Optional[int] = model(__A , token_type_ids=__A )
lowerCAmelCase_ :Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = NystromformerForMaskedLM(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Tuple = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Any:
lowerCAmelCase_ :Tuple = NystromformerForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Tuple = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = self.num_labels
lowerCAmelCase_ :Optional[int] = NystromformerForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.num_labels
lowerCAmelCase_ :Union[str, Any] = NystromformerForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Tuple = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
lowerCAmelCase_ :Optional[int] = self.num_choices
lowerCAmelCase_ :Tuple = NystromformerForMultipleChoice(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ :Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ :Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ :int = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) :Optional[int] = config_and_inputs
lowerCAmelCase_ :int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCAmelCase_ :Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ :Dict = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :int = False
UpperCAmelCase_ :Optional[int] = False
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[Any] = NystromformerModelTester(self )
lowerCAmelCase_ :Any = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ :str = type
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :List[str] = NystromformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCAmelCase_ :List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase_ :Union[str, Any] = model(__A )[0]
lowerCAmelCase_ :List[str] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __A )
lowerCAmelCase_ :Optional[Any] = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = """the [MASK] of Belgium is Brussels"""
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCAmelCase_ :Union[str, Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCAmelCase_ :Optional[int] = tokenizer(__A , return_tensors="""pt""" )
with torch.no_grad():
lowerCAmelCase_ :Union[str, Any] = model(encoding.input_ids ).logits
lowerCAmelCase_ :Union[str, Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__A ) , """capital""" )
| 355 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = ['pixel_values']
def __init__( self : Optional[int] , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , **lowerCAmelCase : Optional[Any] , ):
super().__init__(**lowerCAmelCase )
lowerCAmelCase = size if size is not None else {"""shortest_edge""": 224}
lowerCAmelCase = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
lowerCAmelCase = get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_flip_channel_order
def __lowercase ( self : Dict , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PIL.Image.BILINEAR , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Dict , ):
lowerCAmelCase = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __lowercase ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ):
lowerCAmelCase = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def __lowercase ( self : str , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __lowercase ( self : Any , lowerCAmelCase : np.ndarray , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(lowerCAmelCase , data_format=lowerCAmelCase )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Any , ):
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(lowerCAmelCase , param_name="""crop_size""" )
lowerCAmelCase = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowerCAmelCase = [self.flip_channel_order(image=lowerCAmelCase ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
lowerCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
def __lowercase ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : List[Tuple] = None ):
lowerCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase ) != len(lowerCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowerCAmelCase ):
lowerCAmelCase = target_sizes.numpy()
lowerCAmelCase = []
for idx in range(len(lowerCAmelCase ) ):
lowerCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowerCAmelCase )
lowerCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase )
else:
lowerCAmelCase = logits.argmax(dim=1 )
lowerCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 155 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = BlipImageProcessor()
lowerCAmelCase = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
lowerCAmelCase = BlipProcessor(lowerCAmelCase , lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).tokenizer
def __lowercase ( self : List[Any] , **lowerCAmelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase ).image_processor
def __lowercase ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : str ):
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowercase ( self : List[str] ):
lowerCAmelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowerCAmelCase , padding_value=1.0 )
lowerCAmelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowerCAmelCase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowerCAmelCase )
lowerCAmelCase = tokenizer(lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase ):
processor()
def __lowercase ( self : List[Any] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowerCAmelCase )
lowerCAmelCase = tokenizer.batch_decode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = BlipProcessor(tokenizer=lowerCAmelCase , image_processor=lowerCAmelCase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowerCAmelCase , images=lowerCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 155 | 1 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = RobertaConfig
SCREAMING_SNAKE_CASE : Optional[Any] = 'roberta'
def __init__( self : List[Any] ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ )
__lowercase = RobertaEmbeddings(lowercase__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RobertaConfig
SCREAMING_SNAKE_CASE : Any = 'roberta'
def __init__( self : Union[str, Any] ,lowercase__ : int ):
super().__init__(lowercase__ )
__lowercase = config.num_labels
__lowercase = config.num_hidden_layers
__lowercase = DeeRobertaModel(lowercase__ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : int=None ,lowercase__ : Dict=None ,lowercase__ : List[Any]=None ,lowercase__ : str=None ,lowercase__ : List[Any]=-1 ,lowercase__ : Tuple=False ,):
__lowercase = self.num_layers
try:
__lowercase = self.roberta(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,position_ids=lowercase__ ,head_mask=lowercase__ ,inputs_embeds=lowercase__ ,)
__lowercase = outputs[1]
__lowercase = self.dropout(lowercase__ )
__lowercase = self.classifier(lowercase__ )
__lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowercase = e.message
__lowercase = e.exit_layer
__lowercase = outputs[0]
if not self.training:
__lowercase = entropy(lowercase__ )
__lowercase = []
__lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
__lowercase = []
for highway_exit in outputs[-1]:
__lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
__lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowercase = (loss,) + outputs
if not self.training:
__lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 369 |
'''simple docstring'''
from math import sqrt
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowercase = True
# 0 and 1 are none primes.
if number <= 1:
__lowercase = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowercase = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowercase = list(range(2 , n + 1 ) )
__lowercase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowercase = 0
# filters actual prime numbers.
__lowercase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
__lowercase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
__lowercase = [] # this list will be returns of the function.
# potential prime number factors.
__lowercase = 2
__lowercase = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowercase = 0
# prime factorization of 'number'
__lowercase = prime_factorization(A__ )
__lowercase = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def _A ( A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
__lowercase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowercase = get_prime_numbers(A__ )
__lowercase = len(A__ )
# run variable for while-loops.
__lowercase = 0
__lowercase = None
# exit variable. for break up the loops
__lowercase = True
while i < len_pn and loop:
__lowercase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowercase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowercase = 0
while numbera != 0:
__lowercase = numbera % numbera
__lowercase = numbera
__lowercase = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowercase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowercase = prime_factorization(A__ )
__lowercase = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
__lowercase = []
__lowercase = []
__lowercase = max(A__ , A__ )
__lowercase = 0
__lowercase = 0
__lowercase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowercase = prime_fac_a.count(A__ )
__lowercase = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowercase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
__lowercase = 0
__lowercase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowercase = p_number_a + 1 # jump to the next number
__lowercase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
__lowercase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowercase = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _A ( A__ , A__ ):
"""simple docstring"""
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowercase = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowercase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _A ( A__ ):
"""simple docstring"""
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowercase = 0
__lowercase = 1
__lowercase = 1 # this will be return
for _ in range(n - 1 ):
__lowercase = ans
ans += fiba
__lowercase = tmp
return ans
| 52 | 0 |
import functools
def _A ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError("All days elements should be less than 366" )
a__ : Any =set(SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(SCREAMING_SNAKE_CASE : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
import numpy as np
def _A ( SCREAMING_SNAKE_CASE : np.array ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a__: Union[str, Any] = '__DUMMY_TRANSFORMERS_USER__'
a__: Any = 'Dummy User'
a__: List[Any] = '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'
a__: Tuple = 'https://hub-ci.huggingface.co'
a__: Any = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}'
a__: Optional[int] = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}'
a__: str = Path('~/.huggingface/hub_ci_token').expanduser()
@pytest.fixture
def UpperCamelCase__( UpperCamelCase__ : Any )->int:
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , UpperCamelCase__ )
@pytest.fixture
def UpperCamelCase__( UpperCamelCase__ : int )->int:
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , UpperCamelCase__ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , UpperCamelCase__ )
@pytest.fixture
def UpperCamelCase__( UpperCamelCase__ : str )->Optional[Any]:
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , UpperCamelCase__ )
@pytest.fixture
def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple )->Optional[int]:
HfFolder.save_token(UpperCamelCase__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def UpperCamelCase__( )->List[str]:
return HfApi(endpoint=UpperCamelCase__ )
@pytest.fixture(scope='''session''' )
def UpperCamelCase__( UpperCamelCase__ : HfApi )->List[str]:
A__ = HfFolder.get_token()
HfFolder.save_token(UpperCamelCase__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCamelCase__ )
@pytest.fixture
def UpperCamelCase__( UpperCamelCase__ : Dict )->Optional[int]:
def _cleanup_repo(UpperCamelCase__ : List[str] ):
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def UpperCamelCase__( UpperCamelCase__ : Tuple )->Optional[Any]:
@contextmanager
def _temporary_repo(UpperCamelCase__ : List[Any] ):
try:
yield repo_id
finally:
cleanup_repo(UpperCamelCase__ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def UpperCamelCase__( UpperCamelCase__ : HfApi , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] )->Any:
A__ = f"repo_txt_data-{int(time.time() * 1_0e3 )}"
A__ = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' , private=UpperCamelCase__ )
hf_api.upload_file(
token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo='''data/text_data.txt''' , repo_id=UpperCamelCase__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple )->Optional[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def UpperCamelCase__( UpperCamelCase__ : HfApi , UpperCamelCase__ : List[str] , UpperCamelCase__ : str )->Union[str, Any]:
A__ = f"repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"
A__ = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' , private=UpperCamelCase__ )
hf_api.upload_file(
token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo='''data.zip''' , repo_id=UpperCamelCase__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase__( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] )->int:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def UpperCamelCase__( UpperCamelCase__ : HfApi , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] )->Tuple:
A__ = f"repo_zipped_img_data-{int(time.time() * 1_0e3 )}"
A__ = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' , private=UpperCamelCase__ )
hf_api.upload_file(
token=UpperCamelCase__ , path_or_fileobj=str(UpperCamelCase__ ) , path_in_repo='''data.zip''' , repo_id=UpperCamelCase__ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCamelCase__ , token=UpperCamelCase__ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int )->Tuple:
return hf_private_dataset_repo_zipped_img_data_
| 39 |
def UpperCamelCase__( UpperCamelCase__ : int = 1_00 )->int:
A__ = (n * (n + 1) // 2) ** 2
A__ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }")
| 39 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
UpperCAmelCase__ = XGLMConfig
UpperCAmelCase__ = {}
UpperCAmelCase__ = '''gelu'''
def __init__( self , _A , _A=1_4 , _A=7 , _A=True , _A=True , _A=True , _A=9_9 , _A=3_2 , _A=2 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=0.02 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = ffn_dim
UpperCAmelCase = activation_function
UpperCAmelCase = activation_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = None
UpperCAmelCase = 0
UpperCAmelCase = 2
UpperCAmelCase = 1
def _lowercase ( self ):
'''simple docstring'''
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = self.get_config()
UpperCAmelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_A , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
UpperCAmelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFXGLMModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_A , n_embd=3_7 )
def _lowercase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def _lowercase ( self ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFXGLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _lowercase ( self ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class A_ (unittest.TestCase ):
@slow
def _lowercase ( self , _A=True ):
'''simple docstring'''
UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCAmelCase = model.generate(_A , do_sample=_A , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCAmelCase = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCAmelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCAmelCase = model.generate(_A , do_sample=_A , seed=[7, 0] )
UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=_A )
UpperCAmelCase = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(_A , _A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase = '''left'''
# use different length sentences to test batching
UpperCAmelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCAmelCase = tokenizer(_A , return_tensors='''tf''' , padding=_A )
UpperCAmelCase = inputs['''input_ids''']
UpperCAmelCase = model.generate(input_ids=_A , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 )
UpperCAmelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCAmelCase = model.generate(input_ids=_A , max_new_tokens=1_2 )
UpperCAmelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCAmelCase = model.generate(input_ids=_A , max_new_tokens=1_2 )
UpperCAmelCase = tokenizer.batch_decode(_A , skip_special_tokens=_A )
UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_A )
UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=_A )
UpperCAmelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , [non_padded_sentence, padded_sentence] )
| 273 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self: Optional[int] , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Union[int, float] = 1 / 255 , _SCREAMING_SNAKE_CASE: bool = True , _SCREAMING_SNAKE_CASE: Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE: bool = True , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 224}
__lowerCAmelCase : str = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = crop_size if crop_size is not None else {"height": 224, "width": 224}
__lowerCAmelCase : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name="crop_size")
__lowerCAmelCase : str = do_resize
__lowerCAmelCase : List[Any] = size
__lowerCAmelCase : Tuple = resample
__lowerCAmelCase : List[Any] = do_center_crop
__lowerCAmelCase : int = crop_size
__lowerCAmelCase : Any = do_rescale
__lowerCAmelCase : str = rescale_factor
__lowerCAmelCase : List[str] = do_normalize
__lowerCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCAmelCase : str = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCAmelCase : Any = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Dict[str, int] , _SCREAMING_SNAKE_CASE: PILImageResampling = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: str , ) -> np.ndarray:
"""simple docstring"""
__lowerCAmelCase : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE)
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
__lowerCAmelCase : List[Any] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=_SCREAMING_SNAKE_CASE)
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Dict[str, int] , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: Optional[int] , ) -> np.ndarray:
"""simple docstring"""
__lowerCAmelCase : int = get_size_dict(_SCREAMING_SNAKE_CASE)
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Union[int, float] , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[Any] , ) -> Dict:
"""simple docstring"""
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: np.ndarray , _SCREAMING_SNAKE_CASE: Union[float, List[float]] , _SCREAMING_SNAKE_CASE: Union[float, List[float]] , _SCREAMING_SNAKE_CASE: Optional[Union[str, ChannelDimension]] = None , **_SCREAMING_SNAKE_CASE: List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: ImageInput , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Dict[str, int] = None , _SCREAMING_SNAKE_CASE: PILImageResampling = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: int = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: float = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE: Optional[Union[float, List[float]]] = None , _SCREAMING_SNAKE_CASE: bool = None , _SCREAMING_SNAKE_CASE: Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE: Optional[ChannelDimension] = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE: Any , ) -> PIL.Image.Image:
"""simple docstring"""
__lowerCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : Any = size if size is not None else self.size
__lowerCAmelCase : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = resample if resample is not None else self.resample
__lowerCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase : int = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase : str = get_size_dict(_SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : str = image_std if image_std is not None else self.image_std
__lowerCAmelCase : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCAmelCase : Optional[Any] = make_list_of_images(_SCREAMING_SNAKE_CASE)
if not valid_images(_SCREAMING_SNAKE_CASE):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCAmelCase : str = [convert_to_rgb(_SCREAMING_SNAKE_CASE) for image in images]
# All transformations expect numpy arrays.
__lowerCAmelCase : Any = [to_numpy_array(_SCREAMING_SNAKE_CASE) for image in images]
if do_resize:
__lowerCAmelCase : Tuple = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE) for image in images]
if do_center_crop:
__lowerCAmelCase : Tuple = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE) for image in images]
if do_rescale:
__lowerCAmelCase : List[Any] = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE) for image in images]
if do_normalize:
__lowerCAmelCase : int = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE) for image in images]
__lowerCAmelCase : int = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) for image in images]
__lowerCAmelCase : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE) | 58 |
"""simple docstring"""
__snake_case : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case : Union[str, Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case : Any = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case : str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 58 | 1 |
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
def _A (__a=None , __a=None ) -> int:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__a )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__UpperCamelCase = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__UpperCamelCase = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__UpperCamelCase = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
__UpperCamelCase = field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__UpperCamelCase = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
__UpperCamelCase = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__UpperCamelCase = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__UpperCamelCase = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__UpperCamelCase = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
__UpperCamelCase = field(
default=f'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
__UpperCamelCase = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__UpperCamelCase = field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
warnings.warn(
F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils'
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self) , indent=2)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
if len(self.models) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''')
return self.models
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''')
return False
else:
return True
| 91 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(A )
@property
def __A ( self ) -> List[Any]:
'''simple docstring'''
def extract(*A , **A ):
class __lowercase :
"""simple docstring"""
def __init__( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = torch.ones([0] )
def __A ( self , A ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(A )
return self
return Out()
return extract
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe([prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=A )
assert isinstance(A , A )
assert isinstance(pipe.scheduler , A )
assert pipe.safety_checker is None
lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
lowerCamelCase = StableDiffusionPipeline.from_pretrained(A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowerCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowerCamelCase = unet.half()
lowerCamelCase = vae.half()
lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase = StableDiffusionPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowerCamelCase = 40_03_66_03_46
lowerCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowerCamelCase = 27_34_97_17_55
lowerCamelCase = 7
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowerCamelCase = 10_44_35_52_34
lowerCamelCase = 12
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = sd_pipe(
[prompt] , generator=A , guidance_scale=A , num_inference_steps=50 , output_type="""np""" , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 252 | 0 |
'''simple docstring'''
import unittest
from transformers import DonutProcessor
a__ : Dict = 'naver-clova-ix/donut-base'
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = DonutProcessor.from_pretrained(lowercase )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__UpperCamelCase = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__UpperCamelCase = self.processor.tokenajson(lowercase )
self.assertDictEqual(lowercase , lowercase )
| 243 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
a__ : Tuple = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 243 | 1 |
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = ''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
snake_case_ = remove_duplicates(key.upper() )
snake_case_ = len(UpperCamelCase__ )
# First fill cipher with key characters
snake_case_ = {alphabet[i]: char for i, char in enumerate(UpperCamelCase__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(UpperCamelCase__ ) , 26 ):
snake_case_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
snake_case_ = alphabet[i - offset]
snake_case_ = char
return cipher_alphabet
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return "".join(cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(UpperCamelCase__ , UpperCamelCase__ ) for ch in message.upper() )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = input('Enter message to encode or decode: ' ).strip()
snake_case_ = input('Enter keyword: ' ).strip()
snake_case_ = input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
snake_case_ = {'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
snake_case_ = create_cipher_map(UpperCamelCase__ )
print(func(UpperCamelCase__ , UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 285 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger()
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case )
def __call__( self , snake_case ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case )
[x.remove() for x in self.handles]
return self
@property
def a ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowercase :
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : nn.Module
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
__SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ )
def __call__( self , snake_case ):
snake_case_ = Tracker(self.dest )(snake_case ).parametrized
snake_case_ = Tracker(self.src )(snake_case ).parametrized
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) )
snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) )
if len(snake_case ) != len(snake_case ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(snake_case )} operations while'''
F''' destination module has {len(snake_case )}.''' )
for dest_m, src_m in zip(snake_case , snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ):
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval()
snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ )
snake_case_ = torch.randn((1, 3, 224, 224) )
module_transfer(UpperCamelCase__ )
assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one."
snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCamelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , )
# we can use the convnext one
snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , )
print(F'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ):
'''simple docstring'''
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = 1000
snake_case_ = (1, num_labels)
snake_case_ = 'huggingface/label-files'
snake_case_ = num_labels
snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
snake_case_ = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 285 | 1 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""{sampling_rate}"""
_snake_case = '1'
_snake_case = 'f32le'
_snake_case = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(__a , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_snake_case = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
_snake_case = output_stream[0]
_snake_case = np.frombuffer(__a , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "f32le" , ):
_snake_case = f"""{sampling_rate}"""
_snake_case = '1'
if format_for_conversion == "s16le":
_snake_case = 2
elif format_for_conversion == "f32le":
_snake_case = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_snake_case = platform.system()
if system == "Linux":
_snake_case = 'alsa'
_snake_case = 'default'
elif system == "Darwin":
_snake_case = 'avfoundation'
_snake_case = ':0'
elif system == "Windows":
_snake_case = 'dshow'
_snake_case = 'default'
_snake_case = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_snake_case = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_snake_case = _ffmpeg_stream(__a , __a )
for item in iterator:
yield item
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "f32le" , ):
if stream_chunk_s is not None:
_snake_case = stream_chunk_s
else:
_snake_case = chunk_length_s
_snake_case = ffmpeg_microphone(__a , __a , format_for_conversion=__a )
if format_for_conversion == "s16le":
_snake_case = np.intaa
_snake_case = 2
elif format_for_conversion == "f32le":
_snake_case = np.floataa
_snake_case = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_snake_case = chunk_length_s / 6
_snake_case = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a , (int, float) ):
_snake_case = [stride_length_s, stride_length_s]
_snake_case = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_snake_case = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_snake_case = datetime.datetime.now()
_snake_case = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a , __a , stride=(stride_left, stride_right) , stream=__a ):
# Put everything back in numpy scale
_snake_case = np.frombuffer(item["""raw"""] , dtype=__a )
_snake_case = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_snake_case = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
_snake_case = b''
_snake_case = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_snake_case = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_snake_case = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_snake_case = (_stride_left, stride_right)
_snake_case = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_snake_case = False
yield item
_snake_case = stride_left
_snake_case = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_snake_case = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_snake_case = False
yield item
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = 2**24 # 16Mo
try:
with subprocess.Popen(__a , stdout=subprocess.PIPE , bufsize=__a ) as ffmpeg_process:
while True:
_snake_case = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error | 350 |
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = len(_SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
_snake_case = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
_snake_case = run_maze(_SCREAMING_SNAKE_CASE , 0 , 0 , _SCREAMING_SNAKE_CASE )
if solved:
print("""\n""".join(str(_SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = len(_SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
_snake_case = 1
return True
_snake_case = (not i < 0) and (not j < 0) # Check lower bounds
_snake_case = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_snake_case = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_snake_case = 1
# check for directions
if (
run_maze(_SCREAMING_SNAKE_CASE , i + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j + 1 , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - 1 , _SCREAMING_SNAKE_CASE )
):
return True
_snake_case = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 270 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Any = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class _snake_case ( lowerCAmelCase_ ):
UpperCamelCase__ = "lxmert"
UpperCamelCase__ = {}
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=9_500 , _a=1_600 , _a=400 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=9 , _a=5 , _a=5 , _a=2_048 , _a=4 , _a=6.67 , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , _a=True , **_a , ):
__magic_name__ : Dict = vocab_size
__magic_name__ : Dict = hidden_size
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Dict = hidden_act
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : Any = layer_norm_eps
__magic_name__ : List[Any] = num_qa_labels
__magic_name__ : List[str] = num_object_labels
__magic_name__ : List[Any] = num_attr_labels
__magic_name__ : Optional[Any] = l_layers
__magic_name__ : Optional[Any] = x_layers
__magic_name__ : Dict = r_layers
__magic_name__ : Optional[Any] = visual_feat_dim
__magic_name__ : Optional[int] = visual_pos_dim
__magic_name__ : Union[str, Any] = visual_loss_normalizer
__magic_name__ : int = task_matched
__magic_name__ : Tuple = task_mask_lm
__magic_name__ : str = task_obj_predict
__magic_name__ : Dict = task_qa
__magic_name__ : Tuple = visual_obj_loss
__magic_name__ : Optional[int] = visual_attr_loss
__magic_name__ : Dict = visual_feat_loss
__magic_name__ : List[str] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCamelCase__ )
| 281 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "table-transformer"
__snake_case : Union[str, Any] = ["past_key_values"]
__snake_case : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : Optional[int]=100 ,lowerCamelCase__ : List[Any]=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : List[Any]=8 ,lowerCamelCase__ : Dict=6 ,lowerCamelCase__ : Dict=2048 ,lowerCamelCase__ : Any=8 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : int=0.0 ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Optional[int]="relu" ,lowerCamelCase__ : Tuple=256 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : Optional[Any]=0.0 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : List[Any]=0.02 ,lowerCamelCase__ : int=1.0 ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Optional[Any]="sine" ,lowerCamelCase__ : List[str]="resnet50" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : Dict=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=5 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : str=0.1 ,**lowerCamelCase__ : List[str] ,) -> Optional[int]:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""" )
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase__ )
# set timm attributes to None
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = None, None, None
SCREAMING_SNAKE_CASE = use_timm_backbone
SCREAMING_SNAKE_CASE = backbone_config
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_queries
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = encoder_ffn_dim
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = encoder_attention_heads
SCREAMING_SNAKE_CASE = decoder_ffn_dim
SCREAMING_SNAKE_CASE = decoder_layers
SCREAMING_SNAKE_CASE = decoder_attention_heads
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = init_xavier_std
SCREAMING_SNAKE_CASE = encoder_layerdrop
SCREAMING_SNAKE_CASE = decoder_layerdrop
SCREAMING_SNAKE_CASE = encoder_layers
SCREAMING_SNAKE_CASE = auxiliary_loss
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = backbone
SCREAMING_SNAKE_CASE = use_pretrained_backbone
SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE = class_cost
SCREAMING_SNAKE_CASE = bbox_cost
SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE = mask_loss_coefficient
SCREAMING_SNAKE_CASE = dice_loss_coefficient
SCREAMING_SNAKE_CASE = bbox_loss_coefficient
SCREAMING_SNAKE_CASE = giou_loss_coefficient
SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase__ ,**lowerCamelCase__ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.d_model
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
'''simple docstring'''
return 12
| 296 | 0 |
import math
def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : float ) -> float:
if (
not isinstance(_lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : float ) -> float:
if (
not isinstance(_lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = '''falcon'''
__UpperCamelCase = ['''past_key_values''']
def __init__( self :List[Any] , snake_case :Optional[int]=65_024 , snake_case :Tuple=4_544 , snake_case :Dict=32 , snake_case :Union[str, Any]=71 , snake_case :List[Any]=1e-5 , snake_case :Union[str, Any]=0.02 , snake_case :List[Any]=True , snake_case :Union[str, Any]=0.0 , snake_case :int=0.0 , snake_case :Union[str, Any]=None , snake_case :Dict=False , snake_case :int=False , snake_case :Tuple=True , snake_case :str=True , snake_case :List[Any]=False , snake_case :Optional[Any]=11 , snake_case :Tuple=11 , **snake_case :List[Any] , ):
'''simple docstring'''
A_ : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
A_ : Any = kwargs.pop("n_embed" , snake_case )
A_ : str = hidden_size if n_embed is None else n_embed
A_ : List[str] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : List[str] = layer_norm_epsilon
A_ : Optional[Any] = initializer_range
A_ : Optional[int] = use_cache
A_ : str = hidden_dropout
A_ : str = attention_dropout
A_ : str = bos_token_id
A_ : List[str] = eos_token_id
A_ : Union[str, Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
A_ : int = alibi
A_ : str = new_decoder_architecture
A_ : Dict = multi_query # Ignored when new_decoder_architecture is True
A_ : Any = parallel_attn
A_ : Optional[Any] = bias
super().__init__(bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return not self.alibi
| 70 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE__ = tuple[int, int]
class lowercase :
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> None:
lowerCAmelCase = pos_x
lowerCAmelCase = pos_y
lowerCAmelCase = (pos_y, pos_x)
lowerCAmelCase = goal_x
lowerCAmelCase = goal_y
lowerCAmelCase = g_cost
lowerCAmelCase = parent
lowerCAmelCase = self.calculate_heuristic()
lowerCAmelCase = self.g_cost + self.h_cost
def _snake_case ( self ) -> float:
lowerCAmelCase = self.pos_x - self.goal_x
lowerCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase ) + abs(lowercase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowercase ) -> bool:
return self.f_cost < other.f_cost
class lowercase :
def __init__( self , lowercase , lowercase ) -> List[Any]:
lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase )
lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowercase )
lowerCAmelCase = [self.start]
lowerCAmelCase = []
lowerCAmelCase = False
def _snake_case ( self ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase )
self.closed_nodes.append(lowercase )
lowerCAmelCase = self.get_successors(lowercase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase )
else:
# retrieve the best current path
lowerCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowercase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase )
else:
self.open_nodes.append(lowercase )
return [self.start.pos]
def _snake_case ( self , lowercase ) -> list[Node]:
lowerCAmelCase = []
for action in delta:
lowerCAmelCase = parent.pos_x + action[1]
lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase , lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase , ) )
return successors
def _snake_case ( self , lowercase ) -> list[TPosition]:
lowerCAmelCase = node
lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase = current_node.parent
path.reverse()
return path
class lowercase :
def __init__( self , lowercase , lowercase ) -> None:
lowerCAmelCase = AStar(lowercase , lowercase )
lowerCAmelCase = AStar(lowercase , lowercase )
lowerCAmelCase = False
def _snake_case ( self ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCAmelCase = self.fwd_astar.open_nodes.pop(0 )
lowerCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase , lowercase )
self.fwd_astar.closed_nodes.append(lowercase )
self.bwd_astar.closed_nodes.append(lowercase )
lowerCAmelCase = current_bwd_node
lowerCAmelCase = current_fwd_node
lowerCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase )
else:
# retrieve the best current path
lowerCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(lowercase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase )
else:
astar.open_nodes.append(lowercase )
return [self.fwd_astar.start.pos]
def _snake_case ( self , lowercase , lowercase ) -> list[TPosition]:
lowerCAmelCase = self.fwd_astar.retrace_path(lowercase )
lowerCAmelCase = self.bwd_astar.retrace_path(lowercase )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE__ = (0, 0)
SCREAMING_SNAKE_CASE__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = AStar(init, goal)
SCREAMING_SNAKE_CASE__ = a_star.search()
SCREAMING_SNAKE_CASE__ = time.time() - start_time
print(f'AStar execution time = {end_time:f} seconds')
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE__ = time.time() - bd_start_time
print(f'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 46 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 42
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 3 , lowercase = 32 , lowercase = 256 , lowercase = 32 , lowercase = None , lowercase = 0.18_215 , lowercase = "group" , ) -> Union[str, Any]:
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
lowerCAmelCase = VectorQuantizer(lowercase , lowercase , beta=0.25 , remap=lowercase , sane_index_shape=lowercase )
lowerCAmelCase = nn.Convad(lowercase , lowercase , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , norm_type=lowercase , )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = True ) -> VQEncoderOutput:
lowerCAmelCase = self.encoder(lowercase )
lowerCAmelCase = self.quant_conv(lowercase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase )
@apply_forward_hook
def _snake_case ( self , lowercase , lowercase = False , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.quantize(lowercase )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(lowercase )
lowerCAmelCase = self.decoder(lowercase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
def _snake_case ( self , lowercase , lowercase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
lowerCAmelCase = sample
lowerCAmelCase = self.encode(lowercase ).latents
lowerCAmelCase = self.decode(lowercase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase )
| 46 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__A = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
__A = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = list(state_dict.keys() )
for name in state_dict_keys:
lowerCAmelCase__ :List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith('emb.' ):
lowerCAmelCase__ :List[Any] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
lowerCAmelCase__ :Any = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
lowerCAmelCase__ :Any = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
lowerCAmelCase__ :List[str] = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
lowerCAmelCase__ :Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
lowerCAmelCase__ :List[str] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
lowerCAmelCase__ :List[Any] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
lowerCAmelCase__ :int = 'rwkv.' + name
lowerCAmelCase__ :Union[str, Any] = weight
return state_dict
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ) ->Tuple:
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
lowerCAmelCase__ :Any = 5_0277
lowerCAmelCase__ :Dict = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
lowerCAmelCase__ :Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = len(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# 2. Build the config
lowerCAmelCase__ :Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCAmelCase__ :Tuple = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
lowerCAmelCase__ :Union[str, Any] = RwkvConfig(
vocab_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
lowerCAmelCase__ :Optional[Any] = hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
lowerCAmelCase__ :Union[str, Any] = convert_state_dict(_SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
lowerCAmelCase__ , lowerCAmelCase__ :str = shard_checkpoint(_SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if index is not None:
lowerCAmelCase__ :List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save the index as well
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
lowerCAmelCase__ :Dict = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
f.write(_SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
lowerCAmelCase__ :Optional[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCAmelCase__ :Union[str, Any] = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
lowerCAmelCase__ :List[Any] = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE , max_shard_size='2GB' )
tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
__A = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 254 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_0 , __UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = parent
lowerCAmelCase__ :Dict = batch_size
lowerCAmelCase__ :Optional[int] = image_size
lowerCAmelCase__ :Any = num_channels
lowerCAmelCase__ :Union[str, Any] = embeddings_size
lowerCAmelCase__ :Optional[int] = hidden_sizes
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :Tuple = is_training
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :str = hidden_act
lowerCAmelCase__ :List[Any] = num_labels
lowerCAmelCase__ :Union[str, Any] = scope
lowerCAmelCase__ :Any = len(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :Any = self.get_config()
return config, pixel_values
def snake_case ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = FlaxRegNetModel(config=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = model(__UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.num_labels
lowerCAmelCase__ :int = FlaxRegNetForImageClassification(config=__UpperCAmelCase )
lowerCAmelCase__ :Any = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = config_and_inputs
lowerCAmelCase__ :Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__magic_name__ :Any = False
__magic_name__ :int = False
__magic_name__ :Any = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = FlaxRegNetModelTester(self )
lowerCAmelCase__ :Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
'''simple docstring'''
return
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :Optional[int] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :str = [*signature.parameters.keys()]
lowerCAmelCase__ :Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ :List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ :Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ :Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = model_class(__UpperCAmelCase )
@jax.jit
def model_jitted(__UpperCAmelCase , **__UpperCAmelCase ):
return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase )
with self.subTest('JIT Enabled' ):
lowerCAmelCase__ :Dict = model_jitted(**__UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase__ :Union[str, Any] = model_jitted(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __A () ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
lowerCAmelCase__ :Any = self.default_image_processor
lowerCAmelCase__ :Dict = prepare_img()
lowerCAmelCase__ :Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :List[str] = model(**__UpperCAmelCase )
# verify the logits
lowerCAmelCase__ :Union[str, Any] = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
lowerCAmelCase__ :Any = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 254 | 1 |
A__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(snake_case , snake_case , snake_case )
order.append(snake_case )
return order
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
_lowerCAmelCase = True
_lowerCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(snake_case , snake_case , snake_case )
return component
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = len(snake_case ) * [False]
_lowerCAmelCase = {vert: [] for vert in range(len(snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(snake_case )
_lowerCAmelCase = []
for i, was_visited in enumerate(snake_case ):
if not was_visited:
order += topology_sort(snake_case , snake_case , snake_case )
_lowerCAmelCase = []
_lowerCAmelCase = len(snake_case ) * [False]
for i in range(len(snake_case ) ):
_lowerCAmelCase = order[len(snake_case ) - i - 1]
if not visited[vert]:
_lowerCAmelCase = find_components(snake_case , snake_case , snake_case )
components_list.append(snake_case )
return components_list
| 82 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = AudioLDMPipeline
__lowerCamelCase = TEXT_TO_AUDIO_PARAMS
__lowerCamelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCamelCase = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def snake_case ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_snake_case , )
_lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
_lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
_lowerCAmelCase = ClapTextModelWithProjection(_snake_case )
_lowerCAmelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
_lowerCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_snake_case , )
_lowerCAmelCase = SpeechTaHifiGan(_snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def snake_case ( self , _snake_case , _snake_case=0 ):
"""simple docstring"""
if str(_snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * ["""this is a negative prompt"""]
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs["""prompt"""]]
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = audioldm_pipe.tokenizer(
_snake_case , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_snake_case , return_tensors="""pt""" , )
_lowerCAmelCase = text_inputs["""input_ids"""].to(_snake_case )
_lowerCAmelCase = audioldm_pipe.text_encoder(
_snake_case , )
_lowerCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_lowerCAmelCase = F.normalize(_snake_case , dim=-1 )
embeds.append(_snake_case )
_lowerCAmelCase , _lowerCAmelCase = embeds
# forward
_lowerCAmelCase = audioldm_pipe(**_snake_case )
_lowerCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = """egg cracking"""
_lowerCAmelCase = audioldm_pipe(**_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 256
_lowerCAmelCase = audio[:10]
_lowerCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = PNDMScheduler(skip_prk_steps=_snake_case )
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
_lowerCAmelCase = 2
_lowerCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_snake_case ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
_lowerCAmelCase = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.016
_lowerCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **_snake_case )
_lowerCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(_snake_case ) / vocoder_sampling_rate == 0.032
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = AudioLDMPipeline(**_snake_case )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = ["""hey"""]
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
_lowerCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_lowerCAmelCase = SpeechTaHifiGan(_snake_case ).to(_snake_case )
_lowerCAmelCase = audioldm_pipe(_snake_case , num_inference_steps=1 )
_lowerCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def snake_case ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_snake_case )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_snake_case )
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase = np.random.RandomState(_snake_case ).standard_normal((1, 8, 128, 16) )
_lowerCAmelCase = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = 25
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[77230:77240]
_lowerCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_lowerCAmelCase = audioldm_pipe.to(_snake_case )
audioldm_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase = self.get_inputs(_snake_case )
_lowerCAmelCase = audioldm_pipe(**_snake_case ).audios[0]
assert audio.ndim == 1
assert len(_snake_case ) == 81920
_lowerCAmelCase = audio[27780:27790]
_lowerCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_lowerCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 82 | 1 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = str(id_ )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = []
UpperCAmelCase__ = {} # {vertex:distance}
def __lt__( self : Tuple , _UpperCAmelCase : str ):
"""simple docstring"""
return self.key < other.key
def __repr__( self : int ):
"""simple docstring"""
return self.id
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[str] ):
"""simple docstring"""
self.neighbors.append(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = weight
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , SCREAMING_SNAKE_CASE__ )
graph[b - 1].add_edge(graph[a - 1] , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : Vertex ):
'''simple docstring'''
UpperCAmelCase__ = []
for u in graph:
UpperCAmelCase__ = math.inf
UpperCAmelCase__ = None
UpperCAmelCase__ = 0
UpperCAmelCase__ = graph[:]
while q:
UpperCAmelCase__ = min(SCREAMING_SNAKE_CASE__ )
q.remove(SCREAMING_SNAKE_CASE__ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase__ = u
UpperCAmelCase__ = u.edges[v.id]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : Vertex ):
'''simple docstring'''
for u in graph:
UpperCAmelCase__ = math.inf
UpperCAmelCase__ = None
UpperCAmelCase__ = 0
UpperCAmelCase__ = list(SCREAMING_SNAKE_CASE__ )
hq.heapify(SCREAMING_SNAKE_CASE__ )
while h:
UpperCAmelCase__ = hq.heappop(SCREAMING_SNAKE_CASE__ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase__ = u
UpperCAmelCase__ = u.edges[v.id]
hq.heapify(SCREAMING_SNAKE_CASE__ )
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _UpperCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
import enum
import shutil
import sys
UpperCAmelCase_ , UpperCAmelCase_ = shutil.get_terminal_size()
UpperCAmelCase_ = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase_ ( enum.Enum ):
'''simple docstring'''
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Union[str, Any] = 1
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]="" ):
'''simple docstring'''
sys.stdout.write(str(SCREAMING_SNAKE_CASE__ ) + end )
sys.stdout.flush()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int="" ):
'''simple docstring'''
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
forceWrite("""\r""" )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def _UpperCamelCase ( ):
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def _UpperCamelCase ( ):
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 61 | 1 |
'''simple docstring'''
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
while second != 0:
__lowercase =first & second
first ^= second
__lowercase =c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = int(input("""Enter the first number: """).strip())
lowerCamelCase = int(input("""Enter the second number: """).strip())
print(f"{add(first, second) = }")
| 166 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
return (preds == labels).mean()
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =pearsonr(_lowerCAmelCase , _lowerCAmelCase )[0]
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
| 166 | 1 |
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = " " ):
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Optional[int] = 0
for index, char in enumerate(UpperCamelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase__ : Union[str, Any] = index + 1
elif index + 1 == len(UpperCamelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod() | 359 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__A =logging.get_logger(__name__)
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False ):
UpperCAmelCase__ : str = """backbone.""" if is_semantic else """"""
UpperCAmelCase__ : Optional[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', """beit.embeddings.cls_token"""),
(f'''{prefix}patch_embed.proj.weight''', """beit.embeddings.patch_embeddings.projection.weight"""),
(f'''{prefix}patch_embed.proj.bias''', """beit.embeddings.patch_embeddings.projection.bias"""),
(f'''{prefix}pos_embed''', """beit.embeddings.position_embeddings"""),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("""mask_token""", """beit.embeddings.mask_token"""),
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("""fc_norm.weight""", """beit.pooler.layernorm.weight"""),
("""fc_norm.bias""", """beit.pooler.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=False ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase__ : Optional[Any] = """backbone.""" if is_semantic else """"""
# queries, keys and values
UpperCAmelCase__ : Any = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase__ : List[str] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase__ : int = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase__ : Any = q_bias
UpperCAmelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase__ : Any = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase__ : Any = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase__ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase__ : Dict = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase__ : Union[str, Any] = gamma_a
UpperCAmelCase__ : str = gamma_a
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : int = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = val
def _UpperCamelCase ( ):
UpperCAmelCase__ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase__ : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
UpperCAmelCase__ : Optional[Any] = False if """rvlcdip""" in checkpoint_url else True
UpperCAmelCase__ : Any = BeitConfig(use_absolute_position_embeddings=UpperCamelCase__ , use_mask_token=UpperCamelCase__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase__ : Optional[Any] = 1_0_2_4
UpperCAmelCase__ : Dict = 4_0_9_6
UpperCAmelCase__ : Any = 2_4
UpperCAmelCase__ : Tuple = 1_6
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase__ : int = 1_6
UpperCAmelCase__ : List[str] = """huggingface/label-files"""
UpperCAmelCase__ : Optional[Any] = """rvlcdip-id2label.json"""
UpperCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Optional[Any] = idalabel
UpperCAmelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase__ : Optional[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
UpperCAmelCase__ : List[str] = create_rename_keys(UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , has_lm_head=UpperCamelCase__ )
# load HuggingFace model
UpperCAmelCase__ : str = BeitForMaskedImageModeling(UpperCamelCase__ ) if has_lm_head else BeitForImageClassification(UpperCamelCase__ )
model.eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image
UpperCAmelCase__ : List[str] = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ )
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[Any] = encoding["""pixel_values"""]
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase__ )
UpperCAmelCase__ : int = outputs.logits
# verify logits
UpperCAmelCase__ : int = [1, 1_6] if """rvlcdip""" in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(UpperCamelCase__ ), "Shape of logits not as expected"
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
if has_lm_head:
UpperCAmelCase__ : Union[str, Any] = """dit-base""" if """base""" in checkpoint_url else """dit-large"""
else:
UpperCAmelCase__ : Tuple = """dit-base-finetuned-rvlcdip""" if """dit-b""" in checkpoint_url else """dit-large-finetuned-rvlcdip"""
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=UpperCamelCase__ , )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase__ , UpperCamelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
__A =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 283 | 0 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Tuple:
'''simple docstring'''
lowercase_ = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
lowercase_ , lowercase_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
assert base_extractor.is_extractable(__lowerCAmelCase )
lowercase_ = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(__lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding="""utf-8""" )
else:
lowercase_ = output_path.read_text(encoding="""utf-8""" )
lowercase_ = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase_ = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
lowercase_ = input_paths[compression_format]
if input_path is None:
lowercase_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__lowerCAmelCase )
lowercase_ = Extractor.infer_extractor_format(__lowerCAmelCase )
assert extractor_format is not None
lowercase_ = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
lowercase_ = file_path.read_text(encoding="""utf-8""" )
else:
lowercase_ = output_path.read_text(encoding="""utf-8""" )
lowercase_ = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / """data_dot_dot"""
directory.mkdir()
lowercase_ = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(__lowerCAmelCase , """w""" ) as f:
f.add(__lowerCAmelCase , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Dict:
'''simple docstring'''
import tarfile
lowercase_ = tmp_path / """data_sym_link"""
directory.mkdir()
lowercase_ = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=__lowerCAmelCase )
with tarfile.TarFile(__lowerCAmelCase , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
lowercase_ = insecure_tar_files[insecure_tar_file]
lowercase_ = tmp_path / """extracted"""
TarExtractor.extract(__lowerCAmelCase , __lowerCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
lowercase_ = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(__lowerCAmelCase )
assert zipfile.is_zipfile(str(__lowerCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(__lowerCAmelCase ) # but we're right
| 136 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "gpt_neox"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str=5_0_4_3_2 , lowerCAmelCase_ : List[Any]=6_1_4_4 , lowerCAmelCase_ : str=4_4 , lowerCAmelCase_ : Tuple=6_4 , lowerCAmelCase_ : Optional[int]=2_4_5_7_6 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Any=0.25 , lowerCAmelCase_ : int=1_0_0_0_0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : List[Any]=1E-5 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = rotary_pct
lowercase_ = rotary_emb_base
lowercase_ = attention_dropout
lowercase_ = hidden_dropout
lowercase_ = classifier_dropout
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_cache
lowercase_ = tie_word_embeddings
lowercase_ = use_parallel_residual
lowercase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase_) or len(self.rope_scaling) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''')
lowercase_ = self.rope_scaling.get("""type""" , lowerCAmelCase_)
lowercase_ = self.rope_scaling.get("""factor""" , lowerCAmelCase_)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''')
if rope_scaling_factor is None or not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
| 136 | 1 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
def __init__( self,*__lowerCamelCase,**__lowerCamelCase ):
super().__init__(*__lowerCamelCase,**__lowerCamelCase )
A__ = {}
def UpperCamelCase ( self,__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase ):
A__ = super().add_tokens(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {placeholder_token}. Please pass a different"
''' `placeholder_token` that is not already in the tokenizer.''' )
def UpperCamelCase ( self,__lowerCamelCase,*__lowerCamelCase,__lowerCamelCase=1,**__lowerCamelCase ):
A__ = []
if num_vec_per_token == 1:
self.try_adding_tokens(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
output.append(__lowerCamelCase )
else:
A__ = []
for i in range(__lowerCamelCase ):
A__ = placeholder_token + f"_{i}"
self.try_adding_tokens(__lowerCamelCase,*__lowerCamelCase,**__lowerCamelCase )
output.append(__lowerCamelCase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"The tokenizer already has placeholder token {token} that can get confused with"
f" {placeholder_token}keep placeholder tokens independent" )
A__ = output
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=False,__lowerCamelCase=1.0 ):
if isinstance(__lowerCamelCase,__lowerCamelCase ):
A__ = []
for i in range(len(__lowerCamelCase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i],vector_shuffle=__lowerCamelCase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
A__ = self.token_map[placeholder_token]
A__ = tokens[: 1 + int(len(__lowerCamelCase ) * prop_tokens_to_load )]
if vector_shuffle:
A__ = copy.copy(__lowerCamelCase )
random.shuffle(__lowerCamelCase )
A__ = text.replace(__lowerCamelCase,''' '''.join(__lowerCamelCase ) )
return text
def __call__( self,__lowerCamelCase,*__lowerCamelCase,__lowerCamelCase=False,__lowerCamelCase=1.0,**__lowerCamelCase ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowerCamelCase,vector_shuffle=__lowerCamelCase,prop_tokens_to_load=__lowerCamelCase ),*__lowerCamelCase,**__lowerCamelCase,)
def UpperCamelCase ( self,__lowerCamelCase,*__lowerCamelCase,__lowerCamelCase=False,__lowerCamelCase=1.0,**__lowerCamelCase ):
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowerCamelCase,vector_shuffle=__lowerCamelCase,prop_tokens_to_load=__lowerCamelCase ),*__lowerCamelCase,**__lowerCamelCase,)
| 353 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = data
A__ = None
class SCREAMING_SNAKE_CASE__ :
def __init__( self ):
A__ = None
A__ = None
def __iter__( self ):
A__ = self.head
while self.head:
yield node.data
A__ = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(__lowerCamelCase ) for item in iter(self ) )
def UpperCamelCase ( self,__lowerCamelCase ):
self.insert_nth(len(self ),__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase ):
self.insert_nth(0,__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
A__ = Node(__lowerCamelCase )
if self.head is None:
A__ = new_node # first node points itself
A__ = A__ = new_node
elif index == 0: # insert at head
A__ = self.head
A__ = A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
if index == len(self ) - 1: # insert at tail
A__ = new_node
def UpperCamelCase ( self ):
return self.delete_nth(0 )
def UpperCamelCase ( self ):
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase ( self,__lowerCamelCase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
A__ = self.head
if self.head == self.tail: # just one node
A__ = A__ = None
elif index == 0: # delete head node
A__ = self.tail.next.next
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
if index == len(self ) - 1: # delete at tail
A__ = temp
return delete_node.data
def UpperCamelCase ( self ):
return len(self ) == 0
def UpperCamelCase__( )->None:
A__ = CircularLinkedList()
assert len(UpperCamelCase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(UpperCamelCase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(UpperCamelCase__ ) == i
circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
UpperCamelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCamelCase__ :List[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
UpperCamelCase__ :Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
UpperCamelCase__ :Union[str, Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ ).loss
UpperCamelCase__ :Union[str, Any] = -tf.math.reduce_mean(UpperCamelCase_ ).numpy()
UpperCamelCase__ :Tuple = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 97 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = list(_snake_case )
UpperCAmelCase = list(_snake_case )
UpperCAmelCase = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase = """_"""
if count > 1:
return False
else:
return "".join(_snake_case )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
while True:
UpperCAmelCase = ["""$"""] * len(_snake_case )
UpperCAmelCase = []
for i in range(len(_snake_case ) ):
for j in range(i + 1 , len(_snake_case ) ):
UpperCAmelCase = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase = """*"""
UpperCAmelCase = """*"""
temp.append("""X""" )
for i in range(len(_snake_case ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_snake_case ) == 0:
return pi
UpperCAmelCase = list(set(_snake_case ) )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
for minterm in minterms:
UpperCAmelCase = """"""
for _ in range(_snake_case ):
UpperCAmelCase = str(minterm % 2 ) + string
minterm //= 2
temp.append(_snake_case )
return temp
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = list(_snake_case )
UpperCAmelCase = list(_snake_case )
UpperCAmelCase = 0
for i in range(len(_snake_case ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = [0] * len(_snake_case )
for i in range(len(chart[0] ) ):
UpperCAmelCase = 0
UpperCAmelCase = -1
for j in range(len(_snake_case ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase = j
if count == 1:
UpperCAmelCase = 1
for i in range(len(_snake_case ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_snake_case ) ):
UpperCAmelCase = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase = 0
UpperCAmelCase = -1
UpperCAmelCase = 0
for i in range(len(_snake_case ) ):
UpperCAmelCase = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase = count_n
UpperCAmelCase = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_snake_case ) ):
UpperCAmelCase = 0
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = [[0 for x in range(len(_snake_case ) )] for x in range(len(_snake_case ) )]
for i in range(len(_snake_case ) ):
UpperCAmelCase = prime_implicants[i].count("""_""" )
for j in range(len(_snake_case ) ):
if is_for_table(prime_implicants[i] , binary[j] , _snake_case ):
UpperCAmelCase = 1
return chart
def _a ( ):
"""simple docstring"""
UpperCAmelCase = int(input("""Enter the no. of variables\n""" ) )
UpperCAmelCase = [
float(_snake_case )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
UpperCAmelCase = decimal_to_binary(_snake_case , _snake_case )
UpperCAmelCase = check(_snake_case )
print("""Prime Implicants are:""" )
print(_snake_case )
UpperCAmelCase = prime_implicant_chart(_snake_case , _snake_case )
UpperCAmelCase = selection(_snake_case , _snake_case )
print("""Essential Prime Implicants are:""" )
print(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 234 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = """▁"""
_UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_UpperCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_UpperCamelCase = {"""vinai/bartpho-syllable""": 1024}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
def __init__( self ,A ,A ,A="<s>" ,A="</s>" ,A="</s>" ,A="<s>" ,A="<unk>" ,A="<pad>" ,A="<mask>" ,A = None ,**A ,):
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
UpperCAmelCase = vocab_file
UpperCAmelCase = monolingual_vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCAmelCase = {}
UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = cnt
cnt += 1
with open(A ,"""r""" ,encoding="""utf-8""" ) as f:
for line in f.readlines():
UpperCAmelCase = line.strip().split()[0]
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(A ) not in self.fairseq_tokens_to_ids:
UpperCAmelCase = len(self.fairseq_tokens_to_ids )
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self ,A ):
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self ,A ,A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _UpperCamelCase ( self ,A ,A = None ,A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def _UpperCamelCase ( self ,A ,A = None ):
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _UpperCamelCase ( self ):
return len(self.fairseq_ids_to_tokens )
def _UpperCamelCase ( self ):
UpperCAmelCase = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self ,A ):
return self.sp_model.encode(A ,out_type=A )
def _UpperCamelCase ( self ,A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _UpperCamelCase ( self ,A ):
return self.fairseq_ids_to_tokens[index]
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """""".join(A ).replace(A ,""" """ ).strip()
return out_string
def _UpperCamelCase ( self ,A ,A = None ):
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"""wb""" ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
A ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,A )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(A ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F'''{str(A )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 234 | 1 |
'''simple docstring'''
import random
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) ->Optional[int]:
_SCREAMING_SNAKE_CASE = a[left_index]
_SCREAMING_SNAKE_CASE = left_index + 1
for j in range(left_index + 1 , __lowerCamelCase ):
if a[j] < pivot:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = a[i], a[j]
i += 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = a[i - 1], a[left_index]
return i - 1
def lowerCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) ->str:
if left < right:
_SCREAMING_SNAKE_CASE = random.randint(__lowerCamelCase , right - 1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_SCREAMING_SNAKE_CASE = partition(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
quick_sort_random(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
__lowerCamelCase , pivot_index + 1 , __lowerCamelCase ) # recursive quicksort to the right of the pivot point
def lowerCamelCase ( ) ->Tuple:
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""" ).strip()
_SCREAMING_SNAKE_CASE = [int(__lowerCamelCase ) for item in user_input.split(""",""" )]
quick_sort_random(__lowerCamelCase , 0 , len(__lowerCamelCase ) )
print(__lowerCamelCase )
if __name__ == "__main__":
main()
| 58 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 58 | 1 |
from __future__ import annotations
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
create_all_state(1, __lowerCamelCase, __lowerCamelCase, [], __lowerCamelCase )
return result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCamelCase, total_number - level + 2 ):
current_list.append(__lowerCamelCase )
create_all_state(i + 1, __lowerCamelCase, level - 1, __lowerCamelCase, __lowerCamelCase )
current_list.pop()
def lowerCamelCase__ (__lowerCamelCase ):
for i in total_list:
print(*__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =4
UpperCamelCase__ =2
UpperCamelCase__ =generate_all_combinations(n, k)
print_all_state(total_list) | 355 |
import numpy as np
import datasets
UpperCamelCase__ ='\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
UpperCamelCase__ ='\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
UpperCamelCase__ ='\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> int:
# convert to numpy arrays
_SCREAMING_SNAKE_CASE : Dict = np.array(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = np.array(__lowerCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
_SCREAMING_SNAKE_CASE : Any = X - np.mean(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = np.cov(reference_distribution.T )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = np.linalg.inv(__lowerCamelCase )
except np.linalg.LinAlgError:
_SCREAMING_SNAKE_CASE : List[str] = np.linalg.pinv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = np.dot(__lowerCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 325 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=__SCREAMING_SNAKE_CASE )
__lowercase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(__SCREAMING_SNAKE_CASE )
# Parse args
__lowercase = parser.parse_known_args()
if not hasattr(__SCREAMING_SNAKE_CASE , '''func''' ):
parser.print_help()
exit(1 )
__lowercase = parse_unknown_args(__SCREAMING_SNAKE_CASE )
# Run
__lowercase = args.func(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main() | 210 | """simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ):
print('Loading config file...' )
def flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any="" , __SCREAMING_SNAKE_CASE : List[Any]="." ):
lowercase_ : List[str] = []
for k, v in d.items():
lowercase_ : Dict = parent_key + sep + k if parent_key else k
if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sep=__SCREAMING_SNAKE_CASE ).items() )
else:
items.append((new_key, v) )
return dict(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = argparse.Namespace()
with open(__SCREAMING_SNAKE_CASE , 'r' ) as yaml_file:
try:
lowercase_ : str = yaml.load(__SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader )
lowercase_ : List[Any] = flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE )
for k, v in flat_cfg.items():
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE ) ) )
return config
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : int = MobileViTVaConfig()
lowercase_ : List[str] = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowercase_ : List[Any] = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : str = 3_84
else:
lowercase_ : Dict = 2_56
lowercase_ : int = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowercase_ : int = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowercase_ : Optional[Any] = 3_84
else:
lowercase_ : Tuple = 2_56
lowercase_ : List[str] = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowercase_ : int = 1_51
lowercase_ : Optional[Any] = 5_12
lowercase_ : str = 'ade20k-id2label.json'
lowercase_ : List[Any] = True
elif task_name.startswith('voc_' ):
lowercase_ : Union[str, Any] = 21
lowercase_ : Tuple = 5_12
lowercase_ : List[str] = 'pascal-voc-id2label.json'
lowercase_ : str = True
# orig_config
lowercase_ : Optional[int] = load_orig_config_file(__SCREAMING_SNAKE_CASE )
assert getattr(__SCREAMING_SNAKE_CASE , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
lowercase_ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
lowercase_ : Any = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
lowercase_ : Optional[Any] = 'huggingface/label-files'
lowercase_ : List[Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase_ : List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase_ : int = idalabel
lowercase_ : List[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str ):
lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = val
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
if base_model:
lowercase_ : int = ''
else:
lowercase_ : str = 'mobilevitv2.'
lowercase_ : Dict = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowercase_ : Dict = k[8:]
else:
lowercase_ : Union[str, Any] = k
if ".block." in k:
lowercase_ : List[str] = k_new.replace('.block.' , '.' )
if ".conv." in k:
lowercase_ : List[Any] = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
lowercase_ : str = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
lowercase_ : Dict = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
lowercase_ : Tuple = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowercase_ : Any = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
lowercase_ : str = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
lowercase_ : Tuple = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
lowercase_ : Any = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
lowercase_ : List[Any] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowercase_ : Dict = [0, 1]
elif i == 4:
lowercase_ : int = [0, 1, 2, 3]
elif i == 5:
lowercase_ : List[str] = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
lowercase_ : List[str] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowercase_ : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
lowercase_ : str = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowercase_ : Optional[Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowercase_ : Any = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
lowercase_ : List[str] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowercase_ : int = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowercase_ : str = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
lowercase_ : Union[str, Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
lowercase_ : Optional[int] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
lowercase_ : Dict = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
lowercase_ : Dict = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def lowercase__( __SCREAMING_SNAKE_CASE : Any ):
lowercase_ : str = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__SCREAMING_SNAKE_CASE )
for k in keys_to_ignore:
state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( ):
lowercase_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase_ : Tuple = get_mobilevitva_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load original state_dict
lowercase_ : Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowercase_ : Tuple = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : Optional[int] = False
else:
lowercase_ : Any = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ).eval()
lowercase_ : int = False
# remove and rename some keys of load the original model
lowercase_ : Any = checkpoint
remove_unused_keys(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# load modified state_dict
model.load_state_dict(__SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase_ : Union[str, Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase_ : Any = image_processor(images=prepare_img() , return_tensors='pt' )
lowercase_ : Optional[int] = model(**__SCREAMING_SNAKE_CASE )
# verify classification model
if task_name.startswith('imagenet' ):
lowercase_ : List[str] = outputs.logits
lowercase_ : int = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowercase_ : Optional[int] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] )
assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 213 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase__ : List[Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase__ : int = ''
else:
lowerCAmelCase__ : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
lowerCAmelCase__ : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase__ : Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Tuple = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = val
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase__ : Optional[int] = 8
# set labels if required
if not base_model:
lowerCAmelCase__ : Union[str, Any] = 1_000
lowerCAmelCase__ : str = 'huggingface/label-files'
lowerCAmelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCAmelCase__ : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ : List[Any] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = idalabel
lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase__ : Any = 384
lowerCAmelCase__ : Optional[Any] = 1_536
lowerCAmelCase__ : Union[str, Any] = 12
lowerCAmelCase__ : Optional[Any] = 6
# load original model from torch hub
lowerCAmelCase__ : List[Any] = torch.hub.load('facebookresearch/dino:main' , SCREAMING_SNAKE_CASE_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase__ : Dict = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if base_model:
lowerCAmelCase__ : Union[str, Any] = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval()
else:
lowerCAmelCase__ : Tuple = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase__ : Tuple = ViTImageProcessor()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
lowerCAmelCase__ : str = encoding['pixel_values']
lowerCAmelCase__ : str = model(SCREAMING_SNAKE_CASE_ )
if base_model:
lowerCAmelCase__ : Dict = original_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
lowerCAmelCase__ : Any = original_model(SCREAMING_SNAKE_CASE_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
lowerCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 307 |
import torch
from torch import nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : Union[str, Any] , a : str , a : str , a : List[Any] , a : List[Any]=1 , a : Tuple=False ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Dict = n_token
lowerCAmelCase__ : Any = d_embed
lowerCAmelCase__ : str = d_proj
lowerCAmelCase__ : int = cutoffs + [n_token]
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
lowerCAmelCase__ : str = div_val
lowerCAmelCase__ : Tuple = self.cutoffs[0]
lowerCAmelCase__ : Dict = len(self.cutoffs ) - 1
lowerCAmelCase__ : Any = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase__ : Optional[int] = nn.ModuleList()
lowerCAmelCase__ : Tuple = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
else:
self.out_projs.append(a )
self.out_layers.append(nn.Linear(a , a ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Optional[Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a , a ) ) )
self.out_layers.append(nn.Linear(a , r_idx - l_idx ) )
lowerCAmelCase__ : Tuple = keep_order
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : int , a : List[str] , a : str ):
'''simple docstring'''
if proj is None:
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase__ : int = nn.functional.linear(a , proj.t().contiguous() )
lowerCAmelCase__ : Tuple = nn.functional.linear(a , a , bias=a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : Optional[int]=None , a : Tuple=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase__ : str = hidden[..., :-1, :].contiguous()
lowerCAmelCase__ : Optional[Any] = labels[..., 1:].contiguous()
lowerCAmelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase__ : Tuple = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
lowerCAmelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase__ : Optional[Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase__ : str = labels != -100
lowerCAmelCase__ : int = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : List[str] = (
-nn.functional.log_softmax(a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : Any = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : Optional[Any] = self.out_layers[i].weight
lowerCAmelCase__ : Optional[int] = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : List[Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(a , dim=1 )
if labels is None:
lowerCAmelCase__ : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase__ : Dict = torch.zeros_like(a , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase__ : Tuple = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase__ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase__ : Tuple = labels.index_select(0 , a ) - l_idx
lowerCAmelCase__ : Any = head_logprob.index_select(0 , a )
lowerCAmelCase__ : Optional[int] = hidden.index_select(0 , a )
else:
lowerCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
lowerCAmelCase__ : Union[str, Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : Optional[int] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase__ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase__ : Tuple = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase__ : Union[str, Any] = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _lowerCamelCase ( self : List[Any] , a : Any ):
'''simple docstring'''
if self.n_clusters == 0:
lowerCAmelCase__ : Union[str, Any] = self._compute_logit(a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(a , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase__ , lowerCAmelCase__ : str = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase__ : str = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase__ : Dict = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase__ : int = self.out_layers[i].weight
lowerCAmelCase__ : int = self.out_layers[i].bias
if i == 0:
lowerCAmelCase__ : Optional[int] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(a )
biases.append(a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase__ : Optional[Any] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : List[Any] = [0] + self.cutoffs
for i in range(len(a ) - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ : str = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase__ : Dict = self._compute_logit(a , a , a , a )
lowerCAmelCase__ : List[str] = nn.functional.log_softmax(a , dim=1 )
lowerCAmelCase__ : Dict = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase__ : List[str] = logprob_i
return out | 307 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCAmelCase = (7_20, 12_80) # Height, Width
lowerCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCAmelCase = 1 / 1_00
lowerCAmelCase = """"""
lowerCAmelCase = """"""
lowerCAmelCase = """"""
lowerCAmelCase = 2_50
def lowerCAmelCase_ ( ) ->List[Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
lowerCamelCase__ : Optional[int] =random.sample(range(len(snake_case_ ) ) , 4 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase__ : Optional[int] =random_chars(3_2 )
lowerCamelCase__ : str =path.split(os.sep )[-1].rsplit('.' , 1 )[0]
lowerCamelCase__ : int =f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
lowerCamelCase__ : Tuple =[]
for anno in new_annos:
lowerCamelCase__ : Tuple =anno[3] - anno[1]
lowerCamelCase__ : int =anno[4] - anno[2]
lowerCamelCase__ : Dict =anno[1] + width / 2
lowerCamelCase__ : Union[str, Any] =anno[2] + height / 2
lowerCamelCase__ : Tuple =f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(f"""{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[str] ) ->Optional[int]:
lowerCamelCase__ : Dict =[]
lowerCamelCase__ : List[Any] =[]
for label_file in glob.glob(os.path.join(snake_case_ , '*.txt' ) ):
lowerCamelCase__ : int =label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(snake_case_ ) as in_file:
lowerCamelCase__ : Dict =in_file.readlines()
lowerCamelCase__ : List[str] =os.path.join(snake_case_ , f"""{label_name}.jpg""" )
lowerCamelCase__ : Dict =[]
for obj_list in obj_lists:
lowerCamelCase__ : List[Any] =obj_list.rstrip('\n' ).split(' ' )
lowerCamelCase__ : Optional[Any] =float(obj[1] ) - float(obj[3] ) / 2
lowerCamelCase__ : Optional[Any] =float(obj[2] ) - float(obj[4] ) / 2
lowerCamelCase__ : Dict =float(obj[1] ) + float(obj[3] ) / 2
lowerCamelCase__ : Tuple =float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Dict = 0.0 , ) ->List[str]:
lowerCamelCase__ : List[str] =np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCamelCase__ : Union[str, Any] =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCamelCase__ : int =scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCamelCase__ : Union[str, Any] =int(scale_x * output_size[1] )
lowerCamelCase__ : Dict =int(scale_y * output_size[0] )
lowerCamelCase__ : Tuple =[]
lowerCamelCase__ : int =[]
for i, index in enumerate(snake_case_ ):
lowerCamelCase__ : Dict =all_img_list[index]
path_list.append(snake_case_ )
lowerCamelCase__ : Optional[int] =all_annos[index]
lowerCamelCase__ : Dict =cva.imread(snake_case_ )
if i == 0: # top-left
lowerCamelCase__ : int =cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
lowerCamelCase__ : Optional[int] =img
for bbox in img_annos:
lowerCamelCase__ : Dict =bbox[1] * scale_x
lowerCamelCase__ : List[str] =bbox[2] * scale_y
lowerCamelCase__ : int =bbox[3] * scale_x
lowerCamelCase__ : int =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCamelCase__ : Optional[int] =cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
lowerCamelCase__ : List[Any] =img
for bbox in img_annos:
lowerCamelCase__ : Union[str, Any] =scale_x + bbox[1] * (1 - scale_x)
lowerCamelCase__ : Tuple =bbox[2] * scale_y
lowerCamelCase__ : List[Any] =scale_x + bbox[3] * (1 - scale_x)
lowerCamelCase__ : Dict =bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCamelCase__ : Dict =cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
lowerCamelCase__ : Union[str, Any] =img
for bbox in img_annos:
lowerCamelCase__ : List[str] =bbox[1] * scale_x
lowerCamelCase__ : Dict =scale_y + bbox[2] * (1 - scale_y)
lowerCamelCase__ : Tuple =bbox[3] * scale_x
lowerCamelCase__ : List[str] =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCamelCase__ : int =cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCamelCase__ : Tuple =img
for bbox in img_annos:
lowerCamelCase__ : List[str] =scale_x + bbox[1] * (1 - scale_x)
lowerCamelCase__ : Dict =scale_y + bbox[2] * (1 - scale_y)
lowerCamelCase__ : str =scale_x + bbox[3] * (1 - scale_x)
lowerCamelCase__ : Any =scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCamelCase__ : Tuple =[
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCAmelCase_ ( snake_case_ : Optional[int] ) ->Union[str, Any]:
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase__ : Tuple =ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 126 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __magic_name__( lowerCamelCase=None, lowerCamelCase=None):
return field(default_factory=lambda: default, metadata=lowerCamelCase)
@dataclass
class a__ :
"""simple docstring"""
__UpperCamelCase : str = field(
metadata={'help': 'The csv file to plot.'} , )
__UpperCamelCase : bool = field(
default=__A , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
__UpperCamelCase : bool = field(
default=__A , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
__UpperCamelCase : bool = field(
default=__A , metadata={'help': 'Disable logarithmic scale when plotting'} , )
__UpperCamelCase : bool = field(
default=__A , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
__UpperCamelCase : Optional[str] = field(
default=__A , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
__UpperCamelCase : Optional[List[str]] = list_field(
default=__A , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def __magic_name__( lowerCamelCase):
try:
int(lowerCamelCase)
return True
except ValueError:
return False
def __magic_name__( lowerCamelCase):
try:
float(lowerCamelCase)
return True
except ValueError:
return False
class a__ :
"""simple docstring"""
def __init__(self , __lowercase ):
__lowerCAmelCase = args
__lowerCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
__lowerCAmelCase = csv.DictReader(__lowercase )
for row in reader:
__lowerCAmelCase = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
__lowerCAmelCase = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
__lowerCAmelCase = float(row['''result'''] )
def _snake_case (self ):
__lowerCAmelCase , __lowerCAmelCase = plt.subplots()
__lowerCAmelCase = '''Time usage''' if self.args.is_time else '''Memory usage'''
__lowerCAmelCase = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowerCAmelCase = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
__lowerCAmelCase = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
__lowerCAmelCase = self.result_dict[model_name]['''result''']
((__lowerCAmelCase) , (__lowerCAmelCase)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowerCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowerCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowercase , )
else:
__lowerCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowerCAmelCase) , (__lowerCAmelCase)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
__lowerCAmelCase = np.asarray(__lowercase , __lowercase )[: len(__lowercase )]
plt.scatter(
__lowercase , __lowercase , label=F"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(__lowercase , __lowercase , '''--''' )
title_str += F""" {label_model_name} vs."""
__lowerCAmelCase = title_str[:-4]
__lowerCAmelCase = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(__lowercase )
plt.xlabel(__lowercase )
plt.ylabel(__lowercase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __magic_name__( ):
__lowerCAmelCase = HfArgumentParser(lowerCamelCase)
__lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
__lowerCAmelCase = Plot(args=lowerCamelCase)
plot.plot()
if __name__ == "__main__":
main()
| 174 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __a ( __UpperCamelCase ):
__lowercase : Tuple = ''
__lowercase : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
'''simple docstring'''
super().__init__(self , **lowerCAmelCase__ )
lowercase__: Tuple = repo_info
lowercase__: List[Any] = token
lowercase__: Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
if self.dir_cache is None:
lowercase__: Optional[int] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase__: List[str] = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(lowerCAmelCase__ ): {'name': str(lowerCAmelCase__ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "rb" , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
if not isinstance(self.repo_info , lowerCAmelCase__ ):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' )
lowercase__: Tuple = hf_hub_url(self.repo_info.id , lowerCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase__ , mode=lowerCAmelCase__ , headers=get_authentication_headers_for_url(lowerCAmelCase__ , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
self._get_dirs()
lowercase__: Optional[int] = self._strip_protocol(lowerCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
self._get_dirs()
lowercase__: List[str] = PurePosixPath(path.strip('/' ) )
lowercase__: Any = {}
for p, f in self.dir_cache.items():
lowercase__: str = PurePosixPath(p.strip('/' ) )
lowercase__: Optional[int] = p.parent
if root == path:
lowercase__: Optional[Any] = f
lowercase__: List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 288 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __a ( __UpperCamelCase ):
__lowercase : Any = 'pegasus'
__lowercase : Union[str, Any] = ['past_key_values']
__lowercase : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowerCAmelCase__=50_265 , lowerCAmelCase__=1_024 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_024 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: int = vocab_size
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[str] = d_model
lowercase__: Optional[Any] = encoder_ffn_dim
lowercase__: Optional[Any] = encoder_layers
lowercase__: Union[str, Any] = encoder_attention_heads
lowercase__: Optional[int] = decoder_ffn_dim
lowercase__: Tuple = decoder_layers
lowercase__: Union[str, Any] = decoder_attention_heads
lowercase__: Dict = dropout
lowercase__: List[str] = attention_dropout
lowercase__: List[str] = activation_dropout
lowercase__: Optional[int] = activation_function
lowercase__: Dict = init_std
lowercase__: Optional[Any] = encoder_layerdrop
lowercase__: List[str] = decoder_layerdrop
lowercase__: Union[str, Any] = use_cache
lowercase__: Any = encoder_layers
lowercase__: List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.d_model
| 288 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : str = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[int] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : List[Any] = {}
lowerCAmelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE , "all_results.json" )
if os.path.exists(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , "r" ) as f:
lowerCAmelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""can't find {path}""" )
return results
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase : List[Any] = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
lowerCAmelCase : Tuple = time()
xla_spawn.main()
lowerCAmelCase : Tuple = time()
lowerCAmelCase : int = get_results(snake_case__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowercase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase : Any = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(snake_case__ , "argv" , snake_case__ ):
xla_spawn.main()
| 133 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = '''src/diffusers'''
lowerCAmelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase__ = spec.loader.load_module()
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , SCREAMING_SNAKE_CASE ) is not None
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = object_name.split("." )
lowerCAmelCase : Optional[int] = 0
# First let's find the module where our object lives.
lowerCAmelCase : Any = parts[i]
while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(SCREAMING_SNAKE_CASE , f"""{module}.py""" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : List[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase : List[str] = ""
lowerCAmelCase : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase : List[str] = line_index
while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase : List[Any] = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE )
lowerCAmelCase__ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCAmelCase__ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCAmelCase__ = re.compile(r'''<FILL\s+[^>]*>''')
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : int = code.split("\n" )
lowerCAmelCase : List[str] = 0
while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
lowerCAmelCase : Tuple = f"""class Bla:\n{code}"""
lowerCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase : List[Any] = style_docstrings_in_code(SCREAMING_SNAKE_CASE )
return result[len("class Bla:\n" ) :] if has_indent else result
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int=False ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase : int = f.readlines()
lowerCAmelCase : List[str] = []
lowerCAmelCase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = search.groups()
lowerCAmelCase : List[str] = find_code_in_diffusers(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = get_indent(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase : Optional[int] = theoretical_indent
lowerCAmelCase : List[str] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase : str = True
while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
break
lowerCAmelCase : Tuple = lines[line_index]
lowerCAmelCase : str = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f"""^{indent}# End copy""" , SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCAmelCase : Tuple = lines[start_index:line_index]
lowerCAmelCase : List[str] = "".join(SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase : List[str] = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None]
lowerCAmelCase : Union[str, Any] = "\n".join(SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : str = replace_pattern.replace("with" , "" ).split("," )
lowerCAmelCase : List[str] = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = pattern.groups()
lowerCAmelCase : List[Any] = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
lowerCAmelCase : Optional[Any] = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code )
lowerCAmelCase : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCAmelCase : Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase : int = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
return diffs
def a__ ( SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
lowerCAmelCase : List[Any] = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , "**/*.py" ) , recursive=SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = []
for filename in all_files:
lowerCAmelCase : List[Any] = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase : List[Any] = "\n".join(SCREAMING_SNAKE_CASE )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 133 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : Optional[int] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase__ : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 37 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
_lowerCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
_lowerCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ):
"""simple docstring"""
lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {}
for id_pred, label in zip(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCAmelCase__ : Dict = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Optional[int] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase )
lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" )
fas.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) )
ems.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase )
lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,)
def UpperCAmelCase_ ( self ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" )
elif self.config_name == "record":
lowerCAmelCase__ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 37 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
def a__ ( __UpperCamelCase , __UpperCamelCase="facebook/mbart-large-en-ro" , __UpperCamelCase=False , __UpperCamelCase=False ):
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )["model"]
remove_ignore_keys_(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = state_dict["encoder.embed_tokens.weight"].shape[0]
SCREAMING_SNAKE_CASE_ = MBartConfig.from_pretrained(__UpperCamelCase , vocab_size=__UpperCamelCase )
if mbart_aa and finetuned:
SCREAMING_SNAKE_CASE_ = "relu"
SCREAMING_SNAKE_CASE_ = state_dict["decoder.embed_tokens.weight"]
SCREAMING_SNAKE_CASE_ = MBartForConditionalGeneration(__UpperCamelCase )
model.model.load_state_dict(__UpperCamelCase )
if finetuned:
SCREAMING_SNAKE_CASE_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
A : Dict = parser.parse_args()
A : Optional[Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 305 | from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase__ = Features({'''text''': Value('''string''' )} )
lowerCamelCase__ = Features({'''summary''': Value('''string''' )} )
lowerCamelCase__ = "text"
lowerCamelCase__ = "summary"
@property
def __A ( self : Dict ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 305 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.