code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def _A ( lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCAmelCase_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCAmelCase_ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = size if size is not None else {"shortest_edge": 224}
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="crop_size" )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = do_center_crop
lowerCAmelCase__ = crop_size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_factor
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : int , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" in size:
lowerCAmelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE__ )
elif "height" in size and "width" in size:
lowerCAmelCase__ = (size["height"], size["width"])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]:
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = to_numpy_array(SCREAMING_SNAKE_CASE__ )
if do_resize:
lowerCAmelCase__ = self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ )
if do_center_crop:
lowerCAmelCase__ = self.center_crop(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ )
if do_rescale:
lowerCAmelCase__ = self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ )
if do_normalize:
lowerCAmelCase__ = self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return image
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : int , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="crop_size" )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowerCAmelCase__ = make_batched(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = [
[
self._preprocess_image(
image=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=SCREAMING_SNAKE_CASE__ , do_rescale=SCREAMING_SNAKE_CASE__ , rescale_factor=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , )
for img in video
]
for video in videos
]
lowerCAmelCase__ = {"pixel_values": videos}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 61 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ : Dict = TaTokenizerFast
A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : Dict=224 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Tuple=400 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : str=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Tuple = min_resolution
SCREAMING_SNAKE_CASE : int = max_resolution
SCREAMING_SNAKE_CASE : Tuple = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = size
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : List[str] = image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std
def _A ( self : List[str] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = ViTImageProcessor if is_vision_available() else None
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = EfficientFormerImageProcessorTester(self )
@property
def _A ( self : Optional[Any] ):
return self.image_proc_tester.prepare_image_processor_dict()
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
def _A ( self : Optional[Any] ):
pass
def _A ( self : List[str] ):
# Initialize image_processor
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processor(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _A ( self : Dict ):
# Initialize image_processor
SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Tuple = image_processor(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def _A ( self : Tuple ):
# Initialize image_processor
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : List[str] = image_processor(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 62 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Union[str, Any] , __lowercase : Any=13 , __lowercase : Optional[int]=7 , __lowercase : str=True , __lowercase : Optional[Any]=True , __lowercase : int=True , __lowercase : int=True , __lowercase : List[str]=99 , __lowercase : int=32 , __lowercase : int=5 , __lowercase : Tuple=4 , __lowercase : str=37 , __lowercase : Optional[int]="gelu" , __lowercase : Tuple=0.1 , __lowercase : str=0.1 , __lowercase : Dict=512 , __lowercase : List[Any]=16 , __lowercase : Dict=2 , __lowercase : Union[str, Any]=0.02 , __lowercase : Dict=4 , ) -> int:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : str = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_attention_mask
__UpperCAmelCase : Dict = use_token_type_ids
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : str = type_sequence_label_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Tuple = num_choices
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_attention_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[int] = config_and_inputs
__UpperCAmelCase : int = True
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = True
a : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self : str ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__UpperCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 63 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _lowerCamelCase ( UpperCamelCase_ ):
__a = ["image_processor", "tokenizer"]
__a = "CLIPImageProcessor"
__a = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ) -> Any:
SCREAMING_SNAKE_CASE__: Optional[int]= None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCAmelCase , )
SCREAMING_SNAKE_CASE__: Tuple= kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE__: int= image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase , lowerCAmelCase )
def __call__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase ) -> Any:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if images is not None:
SCREAMING_SNAKE_CASE__: Tuple= self.image_processor(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__: str= image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase ) , tensor_type=lowerCAmelCase )
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Dict:
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def UpperCamelCase_ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
@property
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__: Any= self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase_ ( self ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self ) -> Tuple:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase , )
return self.image_processor
| 64 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Dict = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]:
require_version(deps[pkg] , UpperCAmelCase_ )
| 13 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__UpperCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ElectraTokenizer
def __init__( self : int ,A : Union[str, Any]=None ,A : Tuple=None ,A : Optional[Any]=True ,A : List[Any]="[UNK]" ,A : Any="[SEP]" ,A : Dict="[PAD]" ,A : Any="[CLS]" ,A : int="[MASK]" ,A : int=True ,A : List[Any]=None ,**A : Dict ,):
'''simple docstring'''
super().__init__(
A ,tokenizer_file=A ,do_lower_case=A ,unk_token=A ,sep_token=A ,pad_token=A ,cls_token=A ,mask_token=A ,tokenize_chinese_chars=A ,strip_accents=A ,**A ,)
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,A ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,A ) != tokenize_chinese_chars
):
UpperCAmelCase__ : Any = getattr(A ,normalizer_state.pop("""type""" ) )
UpperCAmelCase__ : int = do_lower_case
UpperCAmelCase__ : List[Any] = strip_accents
UpperCAmelCase__ : Any = tokenize_chinese_chars
UpperCAmelCase__ : Tuple = normalizer_class(**A )
UpperCAmelCase__ : Union[str, Any] = do_lower_case
def __lowercase ( self : List[Any] ,A : Dict ,A : str=None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : int ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self._tokenizer.model.save(A ,name=A )
return tuple(A )
| 65 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 66 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
snake_case = logging.get_logger(__name__)
enable_full_determinism()
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = UNetaDModel
SCREAMING_SNAKE_CASE_ : Dict = '''sample'''
@property
def __UpperCAmelCase ( self : str ) -> List[Any]:
_lowercase = 4
_lowercase = 3
_lowercase = (32, 32)
_lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__A )
_lowercase = torch.tensor([10] ).to(__A )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
return (3, 32, 32)
@property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
return (3, 32, 32)
def __UpperCAmelCase ( self : int ) -> Optional[int]:
_lowercase = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
_lowercase = self.dummy_input
return init_dict, inputs_dict
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = UNetaDModel
SCREAMING_SNAKE_CASE_ : Dict = '''sample'''
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
_lowercase = 4
_lowercase = 4
_lowercase = (32, 32)
_lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__A )
_lowercase = torch.tensor([10] ).to(__A )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCAmelCase ( self : Any ) -> Dict:
return (4, 32, 32)
@property
def __UpperCAmelCase ( self : Any ) -> List[Any]:
return (4, 32, 32)
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
_lowercase = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
_lowercase , _lowercase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__A )
_lowercase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase , _lowercase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__A )
model.to(__A )
_lowercase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def __UpperCAmelCase ( self : Any ) -> str:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
_lowercase , _lowercase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__A )
model_accelerate.to(__A )
model_accelerate.eval()
_lowercase = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
_lowercase = noise.to(__A )
_lowercase = torch.tensor([10] * noise.shape[0] ).to(__A )
_lowercase = model_accelerate(__A ,__A )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_lowercase , _lowercase = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' ,output_loading_info=__A ,low_cpu_mem_usage=__A )
model_normal_load.to(__A )
model_normal_load.eval()
_lowercase = model_normal_load(__A ,__A )['sample']
assert torch_all_close(__A ,__A ,rtol=1e-3 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_lowercase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__A )
_lowercase = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
_lowercase = noise.to(__A )
_lowercase = torch.tensor([10] * noise.shape[0] ).to(__A )
with torch.no_grad():
_lowercase = model(__A ,__A ).sample
_lowercase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_lowercase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__A ,__A ,rtol=1e-3 ) )
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = UNetaDModel
SCREAMING_SNAKE_CASE_ : List[str] = '''sample'''
@property
def __UpperCAmelCase ( self : Optional[Any] ,__A : str=(32, 32) ) -> Tuple:
_lowercase = 4
_lowercase = 3
_lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__A )
_lowercase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__A )
return {"sample": noise, "timestep": time_step}
@property
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
return (3, 32, 32)
@property
def __UpperCAmelCase ( self : int ) -> Dict:
return (3, 32, 32)
def __UpperCAmelCase ( self : int ) -> Tuple:
_lowercase = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
_lowercase = self.dummy_input
return init_dict, inputs_dict
@slow
def __UpperCAmelCase ( self : str ) -> List[Any]:
_lowercase , _lowercase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ,output_loading_info=__A )
self.assertIsNotNone(__A )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__A )
_lowercase = self.dummy_input
_lowercase = floats_tensor((4, 3) + (256, 256) ).to(__A )
_lowercase = noise
_lowercase = model(**__A )
assert image is not None, "Make sure output is not None"
@slow
def __UpperCAmelCase ( self : int ) -> int:
_lowercase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__A )
_lowercase = 4
_lowercase = 3
_lowercase = (256, 256)
_lowercase = torch.ones((batch_size, num_channels) + sizes ).to(__A )
_lowercase = torch.tensor(batch_size * [1e-4] ).to(__A )
with torch.no_grad():
_lowercase = model(__A ,__A ).sample
_lowercase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowercase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__A ,__A ,rtol=1e-2 ) )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__A )
_lowercase = 4
_lowercase = 3
_lowercase = (32, 32)
_lowercase = torch.ones((batch_size, num_channels) + sizes ).to(__A )
_lowercase = torch.tensor(batch_size * [1e-4] ).to(__A )
with torch.no_grad():
_lowercase = model(__A ,__A ).sample
_lowercase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_lowercase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__A ,__A ,rtol=1e-2 ) )
def __UpperCAmelCase ( self : int ) -> Optional[int]:
# not required for this model
pass | 67 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
from ..utils import DummyObject, requires_backends
class _A ( metaclass=UpperCamelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = ['torch', 'torchsde']
def __init__( self : str , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : int , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Any , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 68 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
if len(UpperCAmelCase_ ) != 32:
raise ValueError('Input must be of length 32' )
__lowerCamelCase : Dict = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:]
__lowerCamelCase : str = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = B''
for char in message:
bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' )
__lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase_ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]:
if len(UpperCAmelCase_ ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ):
__lowerCamelCase : Any = bit_string[pos : pos + 5_12]
__lowerCamelCase : Optional[int] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' )
__lowerCamelCase : Optional[int] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase_ , 2 )
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
return (a + b) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowerCamelCase : Dict = 0x67_45_23_01
__lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89
__lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe
__lowerCamelCase : Union[str, Any] = 0x10_32_54_76
__lowerCamelCase : List[str] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase_ ):
__lowerCamelCase : Dict = aa
__lowerCamelCase : Tuple = ba
__lowerCamelCase : List[Any] = ca
__lowerCamelCase : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowerCamelCase : List[str] = d ^ (b & (c ^ d))
__lowerCamelCase : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowerCamelCase : Optional[int] = c ^ (d & (b ^ c))
__lowerCamelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
__lowerCamelCase : str = b ^ c ^ d
__lowerCamelCase : Any = (3 * i + 5) % 16
else:
__lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ ))
__lowerCamelCase : int = (7 * i) % 16
__lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowerCamelCase : Optional[Any] = d
__lowerCamelCase : Tuple = c
__lowerCamelCase : Optional[int] = b
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Union[str, Any] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 69 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'rwkv'
lowerCamelCase : Any = {'max_position_embeddings': 'context_length'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Tuple = context_length
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : Optional[Any] = layer_norm_epsilon
__lowerCamelCase : int = rescale_every
__lowerCamelCase : Tuple = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
# flake8: noqa
# Lint as: python3
lowerCamelCase : Any = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 70 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Dict = XGLMConfig
lowerCamelCase : List[str] = {}
lowerCamelCase : Union[str, Any] = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any:
__lowerCamelCase : int = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = ffn_dim
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : List[Any] = activation_dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : int = None
__lowerCamelCase : int = 0
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Tuple = 1
def lowercase_ ( self ) -> Any:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : str = self.get_config()
__lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase_ ( self ) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = config_and_inputs
__lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Any = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : str = TFXGLMModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 )
def lowercase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowercase_ ( self ) -> Any:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]:
__lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__lowerCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] )
__lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = 'left'
# use different length sentences to test batching
__lowerCamelCase : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
| 13 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : Any = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Optional[int] ) -> Any:
'''simple docstring'''
inspect_dataset(lowercase_ , lowercase_ )
lowercase =path + '''.py'''
assert script_name in os.listdir(lowercase_ )
assert "__pycache__" not in os.listdir(lowercase_ )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def UpperCamelCase ( lowercase_ : Any , lowercase_ : Any ) -> Any:
'''simple docstring'''
inspect_metric(lowercase_ , lowercase_ )
lowercase =path + '''.py'''
assert script_name in os.listdir(lowercase_ )
assert "__pycache__" not in os.listdir(lowercase_ )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def UpperCamelCase ( lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase =get_dataset_config_info(lowercase_ , config_name=lowercase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def UpperCamelCase ( lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[int] ) -> Tuple:
'''simple docstring'''
with pytest.raises(lowercase_ ):
get_dataset_config_info(lowercase_ , config_name=lowercase_ )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def UpperCamelCase ( lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> int:
'''simple docstring'''
lowercase =get_dataset_config_names(lowercase_ )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def UpperCamelCase ( lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple ) -> Tuple:
'''simple docstring'''
lowercase =get_dataset_infos(lowercase_ )
assert list(infos.keys() ) == expected_configs
lowercase =expected_configs[0]
assert expected_config in infos
lowercase =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] ) -> List[str]:
'''simple docstring'''
lowercase =get_dataset_infos(lowercase_ )
assert expected_config in infos
lowercase =infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : int ) -> List[str]:
'''simple docstring'''
with pytest.raises(lowercase_ ):
get_dataset_split_names(lowercase_ , config_name=lowercase_ )
| 72 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : int = logging.get_logger(__name__)
a_ : Optional[Any] = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''data2vec-audio'''
def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=16 , a=19 , a=5 , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a="sum" , a=False , a=False , a=256 , a=(512, 512, 512, 512, 1500) , a=(5, 3, 3, 1, 1) , a=(1, 2, 3, 1, 1) , a=512 , a=0 , a=1 , a=2 , a=False , a=3 , a=2 , a=3 , a=None , **a , ) -> List[str]:
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = conv_pos_kernel_size
SCREAMING_SNAKE_CASE = len(self.conv_dim)
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE = add_adapter
SCREAMING_SNAKE_CASE = adapter_kernel_size
SCREAMING_SNAKE_CASE = adapter_stride
SCREAMING_SNAKE_CASE = num_adapter_layers
SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return math.prod(self.conv_stride)
| 73 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 | 0 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__SCREAMING_SNAKE_CASE : Any = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
__SCREAMING_SNAKE_CASE : Dict = F'''{src_lang}-{tgt_lang}'''
__SCREAMING_SNAKE_CASE : Tuple = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(snake_case , exist_ok=snake_case )
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(snake_case , '''README.md''' )
print(F'''Generating {path}''' )
with open(snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""")
lowercase_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 74 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ = 1_00_00_00 ) -> int:
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = {1: 1}
for inputa in range(2 , lowerCAmelCase__ ):
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Dict = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
UpperCAmelCase__ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
UpperCAmelCase__ : List[Any] = counter
if counter > pre_counter:
UpperCAmelCase__ : List[Any] = inputa
UpperCAmelCase__ : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 75 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = "cpu" , __UpperCamelCase = None ):
__lowercase : str = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__UpperCamelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
__lowercase : Tuple = v.half()
if save_path is None: # overwrite src_path
__lowercase : List[Any] = src_path
torch.save(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 76 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=5 ) -> Optional[Any]:
"""simple docstring"""
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("<mask>" ) == 1
__UpperCAmelCase : Union[str, Any] = torch.tensor(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ).unsqueeze(0 ) # Batch size 1
__UpperCAmelCase : Tuple = model(UpperCamelCase )[0] # The last hidden-state is the first element of the output tuple
__UpperCAmelCase : Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__UpperCAmelCase : str = logits[0, masked_index, :]
__UpperCAmelCase : List[str] = logits.softmax(dim=0 )
__UpperCAmelCase , __UpperCAmelCase : int = prob.topk(k=UpperCamelCase , dim=0 )
__UpperCAmelCase : Optional[Any] = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(UpperCamelCase ) )] )
__UpperCAmelCase : Any = tokenizer.mask_token
__UpperCAmelCase : List[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
__UpperCAmelCase : Dict = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(UpperCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(UpperCamelCase ) , UpperCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(UpperCamelCase , UpperCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
A = CamembertTokenizer.from_pretrained("""camembert-base""")
A = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
A = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 77 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]:
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : Any = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[int] = num_patches + 1
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (self.image_size, self.image_size)
__lowerCamelCase : str = (self.patch_size, self.patch_size)
__lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Tuple = self.type_sequence_label_size
__lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase_ ( self ) -> None:
__lowerCamelCase : str = FlaxViTModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowercase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
__lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 78 |
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = ''
__lowerCamelCase = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
super().__init__(self , **_lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = repo_info
UpperCAmelCase__ : Optional[int] = token
UpperCAmelCase__ : Any = None
def __UpperCAmelCase ( self ):
if self.dir_cache is None:
UpperCAmelCase__ : Optional[int] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCAmelCase__ : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(_lowerCAmelCase ): {"""name""": str(_lowerCAmelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = "rb" , **_lowerCAmelCase , ):
if not isinstance(self.repo_info , _lowerCAmelCase ):
raise NotImplementedError(f"Open is only implemented for dataset repositories, but got {self.repo_info}" )
UpperCAmelCase__ : Optional[int] = hf_hub_url(self.repo_info.id , _lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCAmelCase , mode=_lowerCAmelCase , headers=get_authentication_headers_for_url(_lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __UpperCAmelCase ( self , _lowerCAmelCase , **_lowerCAmelCase ):
self._get_dirs()
UpperCAmelCase__ : int = self._strip_protocol(_lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=False , **_lowerCAmelCase ):
self._get_dirs()
UpperCAmelCase__ : int = PurePosixPath(path.strip("""/""" ) )
UpperCAmelCase__ : Union[str, Any] = {}
for p, f in self.dir_cache.items():
UpperCAmelCase__ : List[Any] = PurePosixPath(p.strip("""/""" ) )
UpperCAmelCase__ : Any = p.parent
if root == path:
UpperCAmelCase__ : Dict = f
UpperCAmelCase__ : Optional[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 79 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __UpperCamelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=99 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Optional[int]=5 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : Tuple=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : Dict=16 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : int=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : List[Any]=None , ) -> List[Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _a ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Dict:
"""simple docstring"""
__lowercase = DistilBertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = DistilBertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = DistilBertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DistilBertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DistilBertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ) -> str:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = DistilBertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__snake_case :Optional[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case :Dict = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case :Tuple = True
__snake_case :Tuple = True
__snake_case :List[str] = True
__snake_case :Optional[int] = True
def _a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = DistilBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCAmelCase , dim=37 )
def _a ( self : Dict ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowerCAmelCase )
def _a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowerCAmelCase )
def _a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowerCAmelCase )
def _a ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowerCAmelCase )
def _a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowerCAmelCase )
def _a ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowerCAmelCase )
@slow
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DistilBertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@slow
@require_torch_gpu
def _a ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=_lowerCAmelCase )
__lowercase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
__lowercase = torch.jit.trace(
_lowerCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , """traced_model.pt""" ) )
__lowercase = torch.jit.load(os.path.join(_lowerCAmelCase , """traced_model.pt""" ) , map_location=_lowerCAmelCase )
loaded(inputs_dict["""input_ids"""].to(_lowerCAmelCase ) , inputs_dict["""attention_mask"""].to(_lowerCAmelCase ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _a ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__lowercase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
__lowercase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
__lowercase = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1e-4 ) )
| 80 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCamelCase ):
if isinstance(__lowerCamelCase , np.ndarray ):
return list(tensor.shape )
__snake_case : Optional[Any] = tf.shape(__lowerCamelCase )
if tensor.shape == tf.TensorShape(__lowerCamelCase ):
return dynamic
__snake_case : Optional[int] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCamelCase )]
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowerCamelCase , name=__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1e-5 , __lowerCamelCase=-1 ):
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
__snake_case , __snake_case : int = tf.nn.moments(__lowerCamelCase , axes=[axis] , keepdims=__lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
__snake_case : Optional[int] = [1] * inputs.shape.rank
__snake_case : Union[str, Any] = shape_list(__lowerCamelCase )[axis]
__snake_case : Any = tf.reshape(__lowerCamelCase , __lowerCamelCase )
__snake_case : Any = tf.reshape(__lowerCamelCase , __lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
__snake_case : Dict = tf.nn.batch_normalization(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , offset=__lowerCamelCase , scale=__lowerCamelCase , variance_epsilon=__lowerCamelCase , )
return outputs
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=0 , __lowerCamelCase=-1 ):
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
__snake_case : Any = tf.shape(__lowerCamelCase )
__snake_case : Any = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
__snake_case : Union[str, Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase ):
if not isinstance(__lowerCamelCase , tf.Tensor ):
__snake_case : Dict = tf.convert_to_tensor(__lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
__snake_case : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
__snake_case : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
__snake_case : Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = "input_ids" ):
tf.debugging.assert_less(
__lowerCamelCase , tf.cast(__lowerCamelCase , dtype=tensor.dtype ) , message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCamelCase )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = 6_4_5_1_2
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
__snake_case : Dict = [x for x in data if len(__lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
__snake_case : Optional[int] = np.asarray(__lowerCamelCase )
__snake_case : Any = 1
__snake_case : List[Any] = np.array_split(__lowerCamelCase , __lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
__snake_case : str = np.array_split(__lowerCamelCase , __lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCamelCase ):
__snake_case : Any = chunk_data
else:
__snake_case : Optional[Any] = data
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if name in group.attrs:
__snake_case : Optional[int] = [n.decode("utf8" ) if hasattr(__lowerCamelCase , "decode" ) else n for n in group.attrs[name]]
else:
__snake_case : int = []
__snake_case : Union[str, Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(__lowerCamelCase , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCAmelCase_ ( __lowerCamelCase ):
def _expand_single_ad_tensor(__lowerCamelCase ):
if isinstance(__lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCamelCase )
| 81 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''open-llama'''
def __init__( self : Dict , _UpperCAmelCase : Optional[Any]=100000 , _UpperCAmelCase : Optional[Any]=4096 , _UpperCAmelCase : str=11008 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : str=32 , _UpperCAmelCase : Optional[Any]="silu" , _UpperCAmelCase : int=2048 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Union[str, Any]=1e-6 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Dict=None , **_UpperCAmelCase : Dict , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = kwargs.pop(
"use_memorry_efficient_attention" , _UpperCAmelCase )
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_dropout_prob
UpperCAmelCase_ = use_stable_embedding
UpperCAmelCase_ = shared_input_output_embedding
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , _UpperCAmelCase )
UpperCAmelCase_ = self.rope_scaling.get("factor" , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 82 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ : Dict = TaTokenizerFast
A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = 100_0000 , __SCREAMING_SNAKE_CASE = 10 ):
lowercase = defaultdict(__SCREAMING_SNAKE_CASE )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowercase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowercase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 0 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def _a ( lowercase__ : Any , lowercase__ : Any="shi-labs/oneformer_demo" ):
'''simple docstring'''
with open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = json.load(lowercase__ )
SCREAMING_SNAKE_CASE__ : int = {}
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = []
for key, info in class_info.items():
SCREAMING_SNAKE_CASE__ : str = info['name']
class_names.append(info['name'] )
if info["isthing"]:
thing_ids.append(int(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Dict = thing_ids
SCREAMING_SNAKE_CASE__ : int = class_names
return metadata
class snake_case ( unittest.TestCase ):
def __init__( self : int , a_ : Optional[int] , a_ : Any=7 , a_ : Optional[Any]=3 , a_ : Optional[int]=30 , a_ : List[str]=400 , a_ : Optional[Any]=None , a_ : List[str]=True , a_ : Union[str, Any]=True , a_ : Tuple=[0.5, 0.5, 0.5] , a_ : str=[0.5, 0.5, 0.5] , a_ : List[Any]=10 , a_ : Any=False , a_ : str=255 , a_ : List[str]="shi-labs/oneformer_demo" , a_ : Optional[int]="ade20k_panoptic.json" , a_ : Dict=10 , )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE__ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : str = min_resolution
SCREAMING_SNAKE_CASE__ : int = max_resolution
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE__ : Tuple = {'shortest_edge': 32, 'longest_edge': 1333} if size is None else size
SCREAMING_SNAKE_CASE__ : Any = do_normalize
SCREAMING_SNAKE_CASE__ : List[Any] = image_mean
SCREAMING_SNAKE_CASE__ : List[Any] = image_std
SCREAMING_SNAKE_CASE__ : int = class_info_file
SCREAMING_SNAKE_CASE__ : Any = prepare_metadata(a_ , a_ )
SCREAMING_SNAKE_CASE__ : Tuple = num_text
SCREAMING_SNAKE_CASE__ : str = repo_path
# for the post_process_functions
SCREAMING_SNAKE_CASE__ : List[Any] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 10
SCREAMING_SNAKE_CASE__ : Optional[int] = 10
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : str = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = do_reduce_labels
SCREAMING_SNAKE_CASE__ : Tuple = ignore_index
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __lowercase( self : Optional[int] , a_ : Optional[int] , a_ : Dict=False )-> str:
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE__ : List[str] = image_inputs[0]
if isinstance(a_ , Image.Image ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = image.size
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE__ : Tuple = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE__ : str = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE__ : Any = self.size['shortest_edge']
SCREAMING_SNAKE_CASE__ : Dict = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE__ : str = self.size['shortest_edge']
SCREAMING_SNAKE_CASE__ : str = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE__ : int = []
for image in image_inputs:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(a_ , key=lambda a_ : item[0] )[0]
SCREAMING_SNAKE_CASE__ : Tuple = max(a_ , key=lambda a_ : item[1] )[1]
return expected_height, expected_width
def __lowercase( self : Union[str, Any] )-> int:
"""simple docstring"""
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowercase_ = image_processing_class
def __lowercase( self : Optional[Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = OneFormerImageProcessorTester(self )
@property
def __lowercase( self : Tuple )-> Optional[Any]:
"""simple docstring"""
return self.image_processing_tester.prepare_image_processor_dict()
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'image_mean' ) )
self.assertTrue(hasattr(a_ , 'image_std' ) )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_resize' ) )
self.assertTrue(hasattr(a_ , 'size' ) )
self.assertTrue(hasattr(a_ , 'ignore_index' ) )
self.assertTrue(hasattr(a_ , 'class_info_file' ) )
self.assertTrue(hasattr(a_ , 'num_text' ) )
self.assertTrue(hasattr(a_ , 'repo_path' ) )
self.assertTrue(hasattr(a_ , 'metadata' ) )
self.assertTrue(hasattr(a_ , 'do_reduce_labels' ) )
def __lowercase( self : Dict )-> List[Any]:
"""simple docstring"""
pass
def __lowercase( self : str )-> Tuple:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.image_processing_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.image_processing_tester.get_expected_values(a_ , batched=a_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(
a_ , ['semantic'] * len(a_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase( self : int )-> Union[str, Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_tester.get_expected_values(a_ , batched=a_ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(
a_ , ['semantic'] * len(a_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_tester.get_expected_values(a_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.image_processing_tester.get_expected_values(a_ , batched=a_ )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(
a_ , ['semantic'] * len(a_ ) , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase( self : str , a_ : Optional[int]=False , a_ : Optional[Any]=False , a_ : str="np" )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_tester.num_labels
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=a_ )
if with_segmentation_maps:
SCREAMING_SNAKE_CASE__ : Optional[int] = num_labels
if is_instance_map:
SCREAMING_SNAKE_CASE__ : Dict = list(range(a_ ) ) * 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = dict(enumerate(a_ ) )
SCREAMING_SNAKE_CASE__ : List[str] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
SCREAMING_SNAKE_CASE__ : Optional[Any] = [Image.fromarray(a_ ) for annotation in annotations]
SCREAMING_SNAKE_CASE__ : Dict = image_processor(
a_ , ['semantic'] * len(a_ ) , a_ , return_tensors='pt' , instance_id_to_semantic_id=a_ , pad_and_return_pixel_mask=a_ , )
return inputs
def __lowercase( self : Any )-> List[str]:
"""simple docstring"""
pass
def __lowercase( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
def common(a_ : Optional[Any]=False , a_ : Tuple=None ):
SCREAMING_SNAKE_CASE__ : int = self.comm_get_image_processor_inputs(
with_segmentation_maps=a_ , is_instance_map=a_ , segmentation_type=a_ )
SCREAMING_SNAKE_CASE__ : Tuple = inputs['mask_labels']
SCREAMING_SNAKE_CASE__ : List[Any] = inputs['class_labels']
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs['pixel_values']
SCREAMING_SNAKE_CASE__ : List[str] = inputs['text_inputs']
# check the batch_size
for mask_label, class_label, text_input in zip(a_ , a_ , a_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(a_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=a_ )
common(is_instance_map=a_ , segmentation_type='pil' )
common(is_instance_map=a_ , segmentation_type='pil' )
def __lowercase( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = np.zeros((20, 50) )
SCREAMING_SNAKE_CASE__ : List[Any] = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : int = binary_mask_to_rle(a_ )
self.assertEqual(len(a_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __lowercase( self : Optional[Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE__ : int = fature_extractor.post_process_semantic_segmentation(a_ )
self.assertEqual(len(a_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
SCREAMING_SNAKE_CASE__ : List[str] = fature_extractor.post_process_semantic_segmentation(a_ , target_sizes=a_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __lowercase( self : Optional[int] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor.post_process_instance_segmentation(a_ , threshold=0 )
self.assertTrue(len(a_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , a_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __lowercase( self : Union[str, Any] )-> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor.post_process_panoptic_segmentation(a_ , threshold=0 )
self.assertTrue(len(a_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('segmentation' in el )
self.assertTrue('segments_info' in el )
self.assertEqual(type(el['segments_info'] ) , a_ )
self.assertEqual(
el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 85 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13 | 0 |
def __snake_case ( __UpperCamelCase : list[list[int]] ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : set ):
"""simple docstring"""
A_ , A_ = len(__UpperCamelCase ), len(grid[0] )
if (
min(__UpperCamelCase ,__UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A_ = 0
count += depth_first_search(__UpperCamelCase ,row + 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,row - 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col + 1 ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col - 1 ,__UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Dict = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]:
require_version(deps[pkg] , UpperCAmelCase_ )
| 13 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[Any]=32 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Tuple=10 , UpperCAmelCase__ : List[Any]=[10, 20, 30, 40] , UpperCAmelCase__ : str=[1, 1, 2, 1] , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : Any="relu" , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Dict=None , ) ->Tuple:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str) ->List[str]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]) ->str:
'''simple docstring'''
A__ = RegNetModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any]) ->int:
'''simple docstring'''
A__ = self.num_labels
A__ = RegNetForImageClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Tuple:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Any) ->Any:
'''simple docstring'''
A__ = RegNetModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(config=UpperCAmelCase__)
for name, module in model.named_modules():
if isinstance(UpperCAmelCase__ , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict):
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A__ = layer_type
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Any:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = RegNetModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->Tuple:
'''simple docstring'''
A__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(UpperCAmelCase__)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# verify the logits
A__ = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = torch.tensor([-0.4180, -1.5051, -3.4836]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
| 87 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
_lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa]
def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : List[str] = int(length / 2 )
for i in range(__snake_case , low + middle ):
comp_and_swap(__snake_case , __snake_case , i + middle , __snake_case )
bitonic_merge(__snake_case , __snake_case , __snake_case , __snake_case )
bitonic_merge(__snake_case , low + middle , __snake_case , __snake_case )
def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ):
"""simple docstring"""
if length > 1:
_lowerCamelCase : List[str] = int(length / 2 )
bitonic_sort(__snake_case , __snake_case , __snake_case , 1 )
bitonic_sort(__snake_case , low + middle , __snake_case , 0 )
bitonic_merge(__snake_case , __snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 88 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE : int = "src/transformers"
SCREAMING_SNAKE_CASE : Optional[int] = "docs/source/en/tasks"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
with open(lowerCamelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowercase : Dict = f.readlines()
# Find the start prompt.
_lowercase : Optional[Any] = 0
while not lines[start_index].startswith(lowerCamelCase_ ):
start_index += 1
start_index += 1
_lowercase : Union[str, Any] = start_index
while not lines[end_index].startswith(lowerCamelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : Dict = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE : List[str] = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE : Optional[Any] = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
_lowercase : int = TASK_GUIDE_TO_MODELS[task_guide]
_lowercase : int = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCamelCase_ , set() )
_lowercase : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> Any:
_lowercase , _lowercase , _lowercase , _lowercase : Any = _find_text_in_file(
filename=os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
_lowercase : int = get_model_list_for_task(lowerCamelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
' to fix this.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 89 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCAmelCase = False
class a__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = generator.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt='''first prompt''' , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = '''cyberpunk 2077'''
lowerCAmelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.dual_guided(
prompt=lowerCamelCase_ , image=lowerCamelCase_ , text_to_image_strength=0.75 , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = '''A painting of a squirrel eating a burger '''
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe.text_to_image(
prompt=lowerCamelCase_ , generator=lowerCamelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowerCAmelCase__ = pipe.image_variation(lowerCamelCase_ , generator=lowerCamelCase_ , output_type='''numpy''' ).images
lowerCAmelCase__ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 90 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
if len(UpperCAmelCase_ ) != 32:
raise ValueError('Input must be of length 32' )
__lowerCamelCase : Dict = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:]
__lowerCamelCase : str = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = B''
for char in message:
bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' )
__lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase_ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]:
if len(UpperCAmelCase_ ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ):
__lowerCamelCase : Any = bit_string[pos : pos + 5_12]
__lowerCamelCase : Optional[int] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' )
__lowerCamelCase : Optional[int] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase_ , 2 )
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
return (a + b) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowerCamelCase : Dict = 0x67_45_23_01
__lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89
__lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe
__lowerCamelCase : Union[str, Any] = 0x10_32_54_76
__lowerCamelCase : List[str] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase_ ):
__lowerCamelCase : Dict = aa
__lowerCamelCase : Tuple = ba
__lowerCamelCase : List[Any] = ca
__lowerCamelCase : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowerCamelCase : List[str] = d ^ (b & (c ^ d))
__lowerCamelCase : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowerCamelCase : Optional[int] = c ^ (d & (b ^ c))
__lowerCamelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
__lowerCamelCase : str = b ^ c ^ d
__lowerCamelCase : Any = (3 * i + 5) % 16
else:
__lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ ))
__lowerCamelCase : int = (7 * i) % 16
__lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowerCamelCase : Optional[Any] = d
__lowerCamelCase : Tuple = c
__lowerCamelCase : Optional[int] = b
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
"""simple docstring"""
from copy import deepcopy
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : list[int] | None = None ,A_ : int | None = None ) -> None:
if arr is None and size is not None:
A = size
A = [0] * size
elif arr is not None:
self.init(A_ )
else:
raise ValueError('Either arr or size must be specified' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : list[int] ) -> None:
A = len(A_ )
A = deepcopy(A_ )
for i in range(1 ,self.size ):
A = self.next_(A_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> list[int]:
A = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
A = self.next_(A_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : int ) -> int:
return index + (index & (-index))
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : int ) -> int:
return index - (index & (-index))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : int ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
A = self.next_(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : int ) -> None:
self.add(A_ ,value - self.get(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> int:
if right == 0:
return 0
A = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
A = self.prev(A_ )
return result
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int:
return self.prefix(A_ ) - self.prefix(A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ) -> int:
return self.query(A_ ,index + 1 )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ) -> int:
value -= self.tree[0]
if value < 0:
return -1
A = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
A = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'rwkv'
lowerCamelCase : Any = {'max_position_embeddings': 'context_length'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Tuple = context_length
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : Optional[Any] = layer_norm_epsilon
__lowerCamelCase : int = rescale_every
__lowerCamelCase : Tuple = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : int ) -> int:
return int(input_a == input_a == 0 )
def _lowerCAmelCase ( ) -> None:
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 92 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
def __A (_SCREAMING_SNAKE_CASE ) ->list[int]:
"""simple docstring"""
lowerCAmelCase__ :str = [True] * limit
lowerCAmelCase__ :Optional[int] = False
lowerCAmelCase__ :str = False
lowerCAmelCase__ :Dict = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCAmelCase__ :Union[str, Any] = i * 2
while index < limit:
lowerCAmelCase__ :Dict = False
lowerCAmelCase__ :Any = index + i
lowerCAmelCase__ :List[Any] = [2]
for i in range(3 , _SCREAMING_SNAKE_CASE , 2 ):
if is_prime[i]:
primes.append(_SCREAMING_SNAKE_CASE )
return primes
def __A (_SCREAMING_SNAKE_CASE = 100_0000 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Tuple = prime_sieve(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = 0
lowerCAmelCase__ :Any = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + length , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCAmelCase__ :List[str] = j - i
lowerCAmelCase__ :Tuple = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Dict = XGLMConfig
lowerCamelCase : List[str] = {}
lowerCamelCase : Union[str, Any] = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any:
__lowerCamelCase : int = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = ffn_dim
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : List[Any] = activation_dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : int = None
__lowerCamelCase : int = 0
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Tuple = 1
def lowercase_ ( self ) -> Any:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : str = self.get_config()
__lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase_ ( self ) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = config_and_inputs
__lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Any = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : str = TFXGLMModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 )
def lowercase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowercase_ ( self ) -> Any:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]:
__lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__lowerCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] )
__lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = 'left'
# use different length sentences to test batching
__lowerCamelCase : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : dict[str, list[str]] , UpperCAmelCase : str ) -> None:
'''simple docstring'''
lowercase : Any =graph
# mapping node to its parent in resulting breadth first tree
lowercase : dict[str, str | None] ={}
lowercase : int =source_vertex
def A__ ( self : Optional[Any] ) -> None:
'''simple docstring'''
lowercase : str ={self.source_vertex}
lowercase : Optional[int] =None
lowercase : List[str] =[self.source_vertex] # first in first out queue
while queue:
lowercase : Optional[int] =queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(UpperCAmelCase )
lowercase : Optional[Any] =vertex
queue.append(UpperCAmelCase )
def A__ ( self : Dict , UpperCAmelCase : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase : int =self.parent.get(UpperCAmelCase )
if target_vertex_parent is None:
lowercase : List[str] =(
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(UpperCAmelCase )
return self.shortest_path(UpperCAmelCase ) + f'->{target_vertex}'
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 94 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13 | 0 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
lowerCamelCase_ = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
lowerCamelCase_ = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
lowerCamelCase_ = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCamelCase_ = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
lowerCamelCase_ = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def snake_case ( A__ ,A__ ):
for tf_name, hf_name in patterns:
UpperCAmelCase_ : Tuple = k.replace(A__ ,A__ )
return k
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = BigBirdPegasusConfig(**A__ )
UpperCAmelCase_ : List[Any] = BigBirdPegasusForConditionalGeneration(A__ )
UpperCAmelCase_ : List[str] = torch_model.state_dict()
UpperCAmelCase_ : Any = {}
# separating decoder weights
UpperCAmelCase_ : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
UpperCAmelCase_ : Dict = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() ,"tf -> hf conversion" ):
UpperCAmelCase_ : List[Any] = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
UpperCAmelCase_ : Any = DECODER_PATTERNS
UpperCAmelCase_ : int = rename_state_dict_key(A__ ,A__ )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
UpperCAmelCase_ : Optional[Any] = v.T
UpperCAmelCase_ : str = torch.from_numpy(A__ )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,"tf -> hf conversion" ):
UpperCAmelCase_ : str = [k.endswith(A__ ) for ending in KEYS_TO_IGNORE]
if any(A__ ):
continue
UpperCAmelCase_ : Optional[int] = REMAINING_PATTERNS
UpperCAmelCase_ : Tuple = rename_state_dict_key(A__ ,A__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
UpperCAmelCase_ : int = v.T
UpperCAmelCase_ : int = torch.from_numpy(A__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
UpperCAmelCase_ : Any = mapping["model.embed_positions.weight"]
UpperCAmelCase_ : List[Any] = mapping.pop("model.embed_positions.weight" )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = torch_model.load_state_dict(A__ ,strict=A__ )
UpperCAmelCase_ : Optional[Any] = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = tf.train.list_variables(A__ )
UpperCAmelCase_ : List[Any] = {}
UpperCAmelCase_ : Optional[int] = ["global_step"]
for name, shape in tqdm(A__ ,desc="converting tf checkpoint to dict" ):
UpperCAmelCase_ : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase_ : Dict = tf.train.load_variable(A__ ,A__ )
UpperCAmelCase_ : int = array
return tf_weights
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = get_tf_weights_as_numpy(A__ )
UpperCAmelCase_ : Union[str, Any] = convert_bigbird_pegasus(A__ ,A__ )
torch_model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 95 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "efficientnet"
def __init__( self : Optional[Any] , __snake_case : int = 3 , __snake_case : int = 6_0_0 , __snake_case : float = 2.0 , __snake_case : float = 3.1 , __snake_case : int = 8 , __snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , __snake_case : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case : List[int] = [] , __snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , __snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , __snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , __snake_case : float = 0.25 , __snake_case : str = "swish" , __snake_case : int = 2_5_6_0 , __snake_case : str = "mean" , __snake_case : float = 0.02 , __snake_case : float = 0.001 , __snake_case : float = 0.99 , __snake_case : float = 0.5 , __snake_case : float = 0.2 , **__snake_case : List[Any] , ) -> List[Any]:
super().__init__(**__snake_case )
__magic_name__: str = num_channels
__magic_name__: List[str] = image_size
__magic_name__: List[str] = width_coefficient
__magic_name__: Optional[Any] = depth_coefficient
__magic_name__: Tuple = depth_divisor
__magic_name__: Dict = kernel_sizes
__magic_name__: int = in_channels
__magic_name__: str = out_channels
__magic_name__: Dict = depthwise_padding
__magic_name__: Union[str, Any] = strides
__magic_name__: Dict = num_block_repeats
__magic_name__: Tuple = expand_ratios
__magic_name__: List[str] = squeeze_expansion_ratio
__magic_name__: Any = hidden_act
__magic_name__: Tuple = hidden_dim
__magic_name__: int = pooling_type
__magic_name__: int = initializer_range
__magic_name__: List[str] = batch_norm_eps
__magic_name__: str = batch_norm_momentum
__magic_name__: List[str] = dropout_rate
__magic_name__: Dict = drop_connect_rate
__magic_name__: Optional[Any] = sum(__snake_case ) * 4
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ) -> float:
return 1E-5
| 96 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class lowercase__( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Optional[int] = XLMProphetNetTokenizer
a :Any = False
a :Optional[int] = True
def _lowercase ( self : Optional[int] ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : Any ) -> Optional[Any]:
lowercase_ = '''[PAD]'''
lowercase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_1_2 )
def _lowercase ( self : int ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def _lowercase ( self : Tuple ) -> List[Any]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
lowercase_ = '''Hello World!'''
lowercase_ = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def _lowercase ( self : Any ) -> List[Any]:
# fmt: off
lowercase_ = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 97 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase__ : int = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
lowercase__ : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
lowercase__ : int = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_UpperCamelCase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_UpperCamelCase = evaluate(dataset=lowerCAmelCase__ , predictions=lowerCAmelCase__ )
return score
| 98 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 99 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]:
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : Any = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[int] = num_patches + 1
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (self.image_size, self.image_size)
__lowerCamelCase : str = (self.patch_size, self.patch_size)
__lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Tuple = self.type_sequence_label_size
__lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase_ ( self ) -> None:
__lowerCamelCase : str = FlaxViTModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowercase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
__lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_A : Union[str, Any] = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : str = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ : Optional[Any] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ : Union[str, Any] = """translator"""
lowerCamelCase__ : List[str] = AutoTokenizer
lowerCamelCase__ : Any = AutoModelForSeqaSeqLM
lowerCamelCase__ : Tuple = LANGUAGE_CODES
lowerCamelCase__ : str = ["""text""", """text""", """text"""]
lowerCamelCase__ : Tuple = ["""text"""]
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
SCREAMING_SNAKE_CASE__ = self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A_ , return_tensors='''pt''' , src_lang=A_ , tgt_lang=A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.model.generate(**A_ )
def lowercase_ ( self , A_ ):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A_ )
| 100 |
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] ='%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
lowerCAmelCase__ : Optional[int] =F"""https://www.google.com/search?q={query}&num=100"""
lowerCAmelCase__ : Union[str, Any] =requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
lowerCAmelCase__ : str =(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
lowerCAmelCase__ : Optional[Any] =parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 101 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__magic_name__ : Any = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 102 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case ( lowerCAmelCase_ ) -> List[Any]:
_snake_case = filter(lambda lowerCAmelCase_ : p.requires_grad , model.parameters() )
_snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
snake_case = logging.getLogger(__name__)
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
if metric == "rouge2":
_snake_case = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_snake_case = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_snake_case = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
_snake_case = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_snake_case = ModelCheckpoint(
dirpath=lowerCAmelCase_ , filename=lowerCAmelCase_ , monitor=f"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
return EarlyStopping(
monitor=f"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCAmelCase_ , verbose=lowerCAmelCase_ , )
class UpperCAmelCase ( pl.Callback ):
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
_snake_case = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCamelCase )
@rank_zero_only
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule , __lowerCamelCase : str , __lowerCamelCase : str=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case = od / '''test_results.txt'''
_snake_case = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_snake_case = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowerCamelCase )
generations_file.parent.mkdir(exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , '''a+''' ) as writer:
for key in sorted(__lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case = metrics[key]
if isinstance(__lowerCamelCase , torch.Tensor ):
_snake_case = val.item()
_snake_case = f"""{key}: {val:.6f}\n"""
writer.write(__lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
_snake_case = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__lowerCamelCase )
@rank_zero_only
def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ):
"""simple docstring"""
try:
_snake_case = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case = pl_module.model.num_parameters()
_snake_case = count_trainable_parameters(__lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def __UpperCAmelCase ( self : str , __lowerCamelCase : pl.Trainer , __lowerCamelCase : pl.LightningModule ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__lowerCamelCase , __lowerCamelCase , '''test''' )
@rank_zero_only
def __UpperCAmelCase ( self : Any , __lowerCamelCase : pl.Trainer , __lowerCamelCase : List[Any] ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 103 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__=-1 ) -> Optional[int]:
# in NER datasets, the last column is usually reserved for NER label
A__ = label_idx
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = mode.value
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , f"""{mode}.txt""" )
A__ = 1
A__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
A__ = []
A__ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
A__ = []
A__ = []
else:
A__ = line.split(" " )
words.append(splits[0] )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
return examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
A__ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(SCREAMING_SNAKE_CASE__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
A__ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[InputExample]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = mode.value
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , f"""{mode}.txt""" )
A__ = 1
A__ = []
with open(SCREAMING_SNAKE_CASE__ , encoding="utf-8" ) as f:
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
A__ = []
A__ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) )
guid_index += 1
return examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = 0
for sentence in parse_incr(SCREAMING_SNAKE_CASE__ ):
A__ = preds_list[example_id]
A__ = ""
for token in sentence:
out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(SCREAMING_SNAKE_CASE__ )
example_id += 1
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
if path:
with open(SCREAMING_SNAKE_CASE__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 104 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCamelCase__ : Any = logging.getLogger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : Dict = self.layer[current_layer](snake_case__ ,snake_case__ ,head_mask[current_layer] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , lowerCamelCase_ , )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ):
super().__init__(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = BertEncoderWithPabee(snake_case__ )
self.init_weights()
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = threshold
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = patience
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 0
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.inference_layers_num / self.inference_instances_num
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
F'*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='
F' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'
)
print(snake_case__ )
@add_start_docstrings_to_model_forward(snake_case__ )
def snake_case ( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=False ,):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.size()
elif inputs_embeds is not None:
SCREAMING_SNAKE_CASE_ : Any = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
SCREAMING_SNAKE_CASE_ : str = torch.ones(snake_case__ ,device=snake_case__ )
if token_type_ids is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.zeros(snake_case__ ,dtype=torch.long ,device=snake_case__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
SCREAMING_SNAKE_CASE_ : torch.Tensor = self.get_extended_attention_mask(snake_case__ ,snake_case__ ,snake_case__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = encoder_hidden_states.size()
SCREAMING_SNAKE_CASE_ : Any = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.ones(snake_case__ ,device=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.invert_attention_mask(snake_case__ )
else:
SCREAMING_SNAKE_CASE_ : int = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
SCREAMING_SNAKE_CASE_ : List[str] = self.get_head_mask(snake_case__ ,self.config.num_hidden_layers )
SCREAMING_SNAKE_CASE_ : int = self.embeddings(
input_ids=snake_case__ ,position_ids=snake_case__ ,token_type_ids=snake_case__ ,inputs_embeds=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = embedding_output
if self.training:
SCREAMING_SNAKE_CASE_ : List[str] = []
for i in range(self.config.num_hidden_layers ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.encoder.adaptive_forward(
snake_case__ ,current_layer=snake_case__ ,attention_mask=snake_case__ ,head_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.pooler(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = output_layers[i](output_dropout(snake_case__ ) )
res.append(snake_case__ )
elif self.patience == 0: # Use all layers for inference
SCREAMING_SNAKE_CASE_ : List[Any] = self.encoder(
snake_case__ ,attention_mask=snake_case__ ,head_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pooler(encoder_outputs[0] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [output_layers[self.config.num_hidden_layers - 1](snake_case__ )]
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
SCREAMING_SNAKE_CASE_ : List[Any] = self.encoder.adaptive_forward(
snake_case__ ,current_layer=snake_case__ ,attention_mask=snake_case__ ,head_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = self.pooler(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = output_layers[i](snake_case__ )
if regression:
SCREAMING_SNAKE_CASE_ : Dict = logits.detach()
if patient_result is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
else:
SCREAMING_SNAKE_CASE_ : List[Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
SCREAMING_SNAKE_CASE_ : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(snake_case__ ) ):
patient_counter += 1
else:
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = logits
if patient_counter == self.patience:
break
SCREAMING_SNAKE_CASE_ : Any = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , lowerCamelCase_ , )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ):
super().__init__(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config.num_labels
SCREAMING_SNAKE_CASE_ : str = BertModelWithPabee(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE_ : Dict = nn.ModuleList(
[nn.Linear(config.hidden_size ,self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(snake_case__ )
def snake_case ( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.bert(
input_ids=snake_case__ ,attention_mask=snake_case__ ,token_type_ids=snake_case__ ,position_ids=snake_case__ ,head_mask=snake_case__ ,inputs_embeds=snake_case__ ,output_dropout=self.dropout ,output_layers=self.classifiers ,regression=self.num_labels == 1 ,)
SCREAMING_SNAKE_CASE_ : List[Any] = (logits[-1],)
if labels is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
for ix, logits_item in enumerate(snake_case__ ):
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE_ : Dict = MSELoss()
SCREAMING_SNAKE_CASE_ : List[Any] = loss_fct(logits_item.view(-1 ) ,labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE_ : str = CrossEntropyLoss()
SCREAMING_SNAKE_CASE_ : Dict = loss_fct(logits_item.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
if total_loss is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = (total_loss / total_weights,) + outputs
return outputs
| 105 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ : Dict = TaTokenizerFast
A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case :List[str] =logging.get_logger(__name__)
__snake_case :Any ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__snake_case :Any =[
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A = 'lm_head'
A = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
A = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase__ )[0].split('.' )[-2]
A = mapped_key.replace('*' , lowerCAmelCase__ )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : str=True ) -> str:
'''simple docstring'''
if config_path is not None:
A = UniSpeechConfig.from_pretrained(lowerCAmelCase__ )
else:
A = UniSpeechConfig()
if is_finetuned:
if dict_path:
A = Dictionary.load_from_json(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A = target_dict.pad_index
A = target_dict.bos_index
A = target_dict.eos_index
A = len(target_dict.symbols )
A = os.path.join(lowerCAmelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
A = target_dict.indices
# fairseq has the <pad> and <s> switched
A = 42
A = 43
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
A = WavaVecaPhonemeCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase__ , )
A = True if config.feat_extract_norm == 'layer' else False
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
A = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
A = UniSpeechForCTC(lowerCAmelCase__ )
else:
A = UniSpeechForPreTraining(lowerCAmelCase__ )
if is_finetuned:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
hf_unispeech.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case :Any =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__snake_case :Optional[Any] =parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 106 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 0 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
_UpperCAmelCase : str = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _SCREAMING_SNAKE_CASE ( ):
_A = Github(os.environ['GITHUB_TOKEN'] )
_A = g.get_repo('huggingface/transformers' )
_A = repo.get_issues(state='open' )
for issue in open_issues:
_A = sorted([comment for comment in issue.get_comments()] , key=lambda __snake_case : i.created_at , reverse=__snake_case )
_A = comments[0] if len(__snake_case ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 107 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def lowerCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = self._create_example_records()
_UpperCAmelCase = Dataset.from_list(lowerCamelCase )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(lowerCamelCase ):
self.assertDictEqual(lowerCamelCase , example_records[i] )
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self._create_example_records()
_UpperCAmelCase = Dataset.from_list(lowerCamelCase )
_UpperCAmelCase = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowerCamelCase ( self : str ) -> Any: # checks what happens with missing columns
"""simple docstring"""
_UpperCAmelCase = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_UpperCAmelCase = Dataset.from_list(lowerCamelCase )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def lowerCamelCase ( self : List[str] ) -> Optional[int]: # checks if the type can be inferred from the second record
"""simple docstring"""
_UpperCAmelCase = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_UpperCAmelCase = Dataset.from_list(lowerCamelCase )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def lowerCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Dataset.from_list([] )
self.assertEqual(len(lowerCamelCase ) , 0 )
self.assertListEqual(dset.column_names , [] ) | 108 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Dict = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]:
require_version(deps[pkg] , UpperCAmelCase_ )
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
class __a :
def __init__( self : Optional[int] ,lowerCamelCase : list[list[int]] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase ,(int, float) ):
raise error
__SCREAMING_SNAKE_CASE = rows
else:
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return len(self.rows )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.rows[0] )
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return self.order[0] == self.order[1]
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return bool(self.determinant() )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : int ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase ).determinant()
def UpperCAmelCase__ ( self : str ,lowerCamelCase : int ,lowerCamelCase : int ):
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase ,lowerCamelCase )
return -1 * self.get_minor(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCamelCase ,lowerCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[Any] ):
'''simple docstring'''
return str(self.rows )
def __str__( self : List[str] ):
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCamelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : list[int] ,lowerCamelCase : int | None = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase ,(int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = self.rows[0:position] + [row] + self.rows[position:]
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : list[int] ,lowerCamelCase : int | None = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase ,(int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
__SCREAMING_SNAKE_CASE = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__SCREAMING_SNAKE_CASE = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : int ,lowerCamelCase : object ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Any ,lowerCamelCase : object ):
'''simple docstring'''
return not self == other
def __neg__( self : Any ):
'''simple docstring'''
return self * -1
def __add__( self : List[Any] ,lowerCamelCase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Any ,lowerCamelCase : Matrix ):
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Any ,lowerCamelCase : Matrix | int | float ):
'''simple docstring'''
if isinstance(lowerCamelCase ,(int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase ,lowerCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase ,lowerCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Optional[int] ,lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
__SCREAMING_SNAKE_CASE = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def UpperCAmelCase__ ( cls : str ,lowerCamelCase : list[int] ,lowerCamelCase : list[int] ):
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13 | 0 |
"""simple docstring"""
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
snake_case_ = logging.getLogger(__name__)
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : List[str] ) -> Union[str, Any]:
# save results
if os.path.exists(UpperCAmelCase_ ):
if os.path.exists(os.path.join(UpperCAmelCase_ , '''config.json''' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , '''config.json''' ) ):
os.remove(os.path.join(UpperCAmelCase_ , '''config.json''' ) )
if os.path.exists(os.path.join(UpperCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(UpperCAmelCase_ , '''pytorch_model.bin''' ) )
else:
os.makedirs(UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : List[str]=False ) -> Tuple:
__snake_case = 2
if unlogit:
__snake_case = torch.pow(UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case = p * torch.log(UpperCAmelCase_ )
__snake_case = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> List[Any]:
logger.info('''lv, h >\t''' + '''\t'''.join(f"""{x + 1}""" for x in range(len(UpperCAmelCase_ ) ) ) )
for row in range(len(UpperCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : Any=True , snake_case_ : Any=True , snake_case_ : Optional[int]=None , snake_case_ : Optional[Any]=False ) -> List[Any]:
__snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
__snake_case = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
__snake_case = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
if head_mask is None:
__snake_case = torch.ones(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__snake_case = None
__snake_case = 0.0
__snake_case = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
__snake_case = tuple(t.to(args.device ) for t in inputs )
(__snake_case ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__snake_case = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase_ ):
__snake_case = entropy(attn.detach() , UpperCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__snake_case = 2
__snake_case = torch.pow(torch.pow(UpperCAmelCase_ , UpperCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(UpperCAmelCase_ )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(UpperCAmelCase_ )
logger.info('''Head ranked by importance scores''' )
__snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__snake_case = torch.arange(
head_importance.numel() , device=args.device )
__snake_case = head_ranks.view_as(UpperCAmelCase_ )
print_ad_tensor(UpperCAmelCase_ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[int] ) -> Any:
__snake_case = compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ )
__snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , UpperCAmelCase_ , original_score * args.masking_threshold )
__snake_case = torch.ones_like(UpperCAmelCase_ )
__snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__snake_case = original_score
while current_score >= original_score * args.masking_threshold:
__snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__snake_case = float('''Inf''' )
__snake_case = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase_ ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
__snake_case = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
__snake_case = new_head_mask.view(-1 )
__snake_case = 0.0
__snake_case = new_head_mask.view_as(UpperCAmelCase_ )
__snake_case = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase_ )
# Compute metric and head importance again
__snake_case = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
__snake_case = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , UpperCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(UpperCAmelCase_ )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> Optional[Any]:
__snake_case = datetime.now()
__snake_case = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
__snake_case = 1 / loss
__snake_case = datetime.now() - before_time
__snake_case = sum(p.numel() for p in model.parameters() )
__snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__snake_case = [
v,
]
assert sum(len(UpperCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase_ )
__snake_case = sum(p.numel() for p in model.parameters() )
__snake_case = datetime.now()
__snake_case = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , actually_pruned=UpperCAmelCase_ , )
__snake_case = 1 / loss
__snake_case = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , UpperCAmelCase_ , UpperCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(UpperCAmelCase_ , args.output_dir )
def lowerCamelCase__ ( ) -> List[Any]:
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=UpperCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=UpperCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=UpperCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=UpperCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=UpperCAmelCase_ , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=UpperCAmelCase_ , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCAmelCase_ , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=UpperCAmelCase_ , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=UpperCAmelCase_ , default=42 )
parser.add_argument('''--local_rank''' , type=UpperCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=UpperCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=UpperCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' )
__snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__snake_case = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
__snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__snake_case = torch.device('''cuda''' , args.local_rank )
__snake_case = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__snake_case = nn.parallel.DistributedDataParallel(
UpperCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase_ )
elif args.n_gpu > 1:
__snake_case = nn.DataParallel(UpperCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase_ )
torch.save(UpperCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase_ )
# Prepare dataset
__snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__snake_case = (torch.from_numpy(UpperCAmelCase_ ),)
__snake_case = TensorDataset(*UpperCAmelCase_ )
__snake_case = RandomSampler(UpperCAmelCase_ )
__snake_case = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__snake_case = mask_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
prune_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 592 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class snake_case__ :
def __init__( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str]=1_3 , _lowerCamelCase : List[str]=3_2 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Union[str, Any]=1_6 , _lowerCamelCase : str=[1, 2, 1] , _lowerCamelCase : Optional[int]=[2, 2, 4] , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=2.0 , _lowerCamelCase : Dict=True , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Any=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Any=True , _lowerCamelCase : str=0.02 , _lowerCamelCase : Optional[int]=1E-5 , _lowerCamelCase : int=True , _lowerCamelCase : str=None , _lowerCamelCase : str=True , _lowerCamelCase : Tuple=1_0 , _lowerCamelCase : Union[str, Any]=8 , _lowerCamelCase : Dict=["stage1", "stage2", "stage3"] , _lowerCamelCase : Union[str, Any]=[1, 2, 3] , ):
snake_case__ : Optional[Any] = parent
snake_case__ : int = batch_size
snake_case__ : Optional[int] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : Dict = embed_dim
snake_case__ : List[Any] = depths
snake_case__ : int = num_heads
snake_case__ : Optional[Any] = window_size
snake_case__ : Optional[Any] = mlp_ratio
snake_case__ : List[str] = qkv_bias
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[Any] = drop_path_rate
snake_case__ : Any = hidden_act
snake_case__ : Union[str, Any] = use_absolute_embeddings
snake_case__ : Any = patch_norm
snake_case__ : Optional[Any] = layer_norm_eps
snake_case__ : str = initializer_range
snake_case__ : Dict = is_training
snake_case__ : Optional[Any] = scope
snake_case__ : Dict = use_labels
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : Dict = encoder_stride
snake_case__ : Union[str, Any] = out_features
snake_case__ : str = out_indices
def UpperCAmelCase__ ( self : int ):
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[str] = None
if self.use_labels:
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ):
snake_case__ : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case__ : Dict = model(SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
snake_case__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ):
snake_case__ : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case__ : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
snake_case__ : str = ['stem']
snake_case__ : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__ : Optional[Any] = config_and_inputs
snake_case__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_lowerCAmelCase =(
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_lowerCAmelCase ={'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
def UpperCAmelCase__ ( self : Any ):
snake_case__ : Optional[Any] = MaskFormerSwinModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self : List[Any] ):
pass
def UpperCAmelCase__ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : List[str] ):
return
def UpperCAmelCase__ ( self : Any ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Any ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self : List[str] ):
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self : int ):
pass
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : str = [*signature.parameters.keys()]
snake_case__ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self : Any ):
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
def UpperCAmelCase__ ( self : str , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] ):
snake_case__ : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
snake_case__ : int = outputs.hidden_states
snake_case__ : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
snake_case__ : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
snake_case__ : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : int ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = 3
snake_case__ : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
snake_case__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case__ : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
snake_case__ : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self : List[str] ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowerCamelCase : Optional[int] ):
snake_case__ : Any = 0
return t
def check_equivalence(_lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Any={} ):
with torch.no_grad():
snake_case__ : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
snake_case__ : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(_lowerCamelCase : Dict , _lowerCamelCase : Tuple ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'''
F''' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'''
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
snake_case__ : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case__ : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
snake_case__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
snake_case__ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
snake_case__ : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class snake_case__ ( unittest.TestCase , _UpperCAmelCase ):
_lowerCAmelCase =(MaskFormerSwinBackbone,) if is_torch_available() else ()
_lowerCAmelCase =MaskFormerSwinConfig
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : List[str] = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
snake_case__ : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
snake_case__ : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
snake_case__ : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
snake_case__ : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
snake_case__ : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 170 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
if len(UpperCAmelCase_ ) != 32:
raise ValueError('Input must be of length 32' )
__lowerCamelCase : Dict = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:]
__lowerCamelCase : str = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = B''
for char in message:
bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' )
__lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase_ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]:
if len(UpperCAmelCase_ ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ):
__lowerCamelCase : Any = bit_string[pos : pos + 5_12]
__lowerCamelCase : Optional[int] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' )
__lowerCamelCase : Optional[int] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase_ , 2 )
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
return (a + b) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowerCamelCase : Dict = 0x67_45_23_01
__lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89
__lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe
__lowerCamelCase : Union[str, Any] = 0x10_32_54_76
__lowerCamelCase : List[str] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase_ ):
__lowerCamelCase : Dict = aa
__lowerCamelCase : Tuple = ba
__lowerCamelCase : List[Any] = ca
__lowerCamelCase : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowerCamelCase : List[str] = d ^ (b & (c ^ d))
__lowerCamelCase : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowerCamelCase : Optional[int] = c ^ (d & (b ^ c))
__lowerCamelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
__lowerCamelCase : str = b ^ c ^ d
__lowerCamelCase : Any = (3 * i + 5) % 16
else:
__lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ ))
__lowerCamelCase : int = (7 * i) % 16
__lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowerCamelCase : Optional[Any] = d
__lowerCamelCase : Tuple = c
__lowerCamelCase : Optional[int] = b
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__snake_case = ["""bert-base-uncased""", """bert-base-cased"""]
__snake_case = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class _a ( tf.keras.Model ):
"""simple docstring"""
def __init__( self : str , lowercase_ : int ):
'''simple docstring'''
super().__init__()
lowercase_ = tokenizer
lowercase_ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase_ = TFAutoModel.from_config(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : str ):
'''simple docstring'''
lowercase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.bert(**SCREAMING_SNAKE_CASE_ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class _a ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
lowercase_ = [
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowercase_ = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE_ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase_ = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowercase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase_ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""tf""" , padding="""longest""" )
lowercase_ = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = tf_tokenizer(self.paired_sentences )
lowercase_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = tf.function(SCREAMING_SNAKE_CASE_ )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowercase_ = tf.constant(SCREAMING_SNAKE_CASE_ )
lowercase_ = compiled_tokenizer(SCREAMING_SNAKE_CASE_ )
lowercase_ = tf_tokenizer(SCREAMING_SNAKE_CASE_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowercase_ = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.convert_to_tensor(self.test_sentences )
lowercase_ = model(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase_ = Path(SCREAMING_SNAKE_CASE_ ) / 'saved.model'
model.save(SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.keras.models.load_model(SCREAMING_SNAKE_CASE_ )
lowercase_ = loaded_model(SCREAMING_SNAKE_CASE_ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 451 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'rwkv'
lowerCamelCase : Any = {'max_position_embeddings': 'context_length'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Tuple = context_length
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : Optional[Any] = layer_norm_epsilon
__lowerCamelCase : int = rescale_every
__lowerCamelCase : Tuple = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
from math import asin, atan, cos, radians, sin, sqrt, tan
UpperCamelCase_ = 6_37_81_37.0
UpperCamelCase_ = 6_35_67_52.31_42_45
UpperCamelCase_ = 6_37_81_37
def _lowerCamelCase ( lowerCamelCase_: float , lowerCamelCase_: float , lowerCamelCase_: float , lowerCamelCase_: float ):
'''simple docstring'''
A : Tuple = (AXIS_A - AXIS_B) / AXIS_A
A : str = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) )
A : Union[str, Any] = atan((1 - flattening) * tan(radians(UpperCAmelCase_ ) ) )
A : Union[str, Any] = radians(UpperCAmelCase_ )
A : str = radians(UpperCAmelCase_ )
# Equation
A : Union[str, Any] = sin((phi_a - phi_a) / 2 )
A : int = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
A : Any = sqrt(sin_sq_phi + (cos(UpperCAmelCase_ ) * cos(UpperCAmelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 77 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Dict = XGLMConfig
lowerCamelCase : List[str] = {}
lowerCamelCase : Union[str, Any] = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any:
__lowerCamelCase : int = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = ffn_dim
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : List[Any] = activation_dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : int = None
__lowerCamelCase : int = 0
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Tuple = 1
def lowercase_ ( self ) -> Any:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : str = self.get_config()
__lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase_ ( self ) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = config_and_inputs
__lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Any = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : str = TFXGLMModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 )
def lowercase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowercase_ ( self ) -> Any:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]:
__lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__lowerCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] )
__lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = 'left'
# use different length sentences to test batching
__lowerCamelCase : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
| 13 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
# TODO Update this
__SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase : Tuple ='esm'
def __init__( self : List[Any] , lowerCAmelCase : str=None , lowerCAmelCase : int=None , lowerCAmelCase : int=None , lowerCAmelCase : Union[str, Any]=7_68 , lowerCAmelCase : List[Any]=12 , lowerCAmelCase : Tuple=12 , lowerCAmelCase : Tuple=30_72 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Tuple=10_26 , lowerCAmelCase : Union[str, Any]=0.0_2 , lowerCAmelCase : List[str]=1e-12 , lowerCAmelCase : Optional[Any]="absolute" , lowerCAmelCase : Tuple=True , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Any=False , lowerCAmelCase : str=False , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : List[str] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = use_cache
A_ = emb_layer_norm_before
A_ = token_dropout
A_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
A_ = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A_ = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
A_ = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
A_ = get_default_vocab_list()
else:
A_ = vocab_list
else:
A_ = None
A_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE_ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _UpperCAmelCase ( self : List[Any] ):
A_ = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
A_ = self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_UpperCAmelCase : str =None
_UpperCAmelCase : bool =True
_UpperCAmelCase : bool =False
_UpperCAmelCase : bool =False
_UpperCAmelCase : bool =False
_UpperCAmelCase : float =0
_UpperCAmelCase : bool =True
_UpperCAmelCase : bool =False
_UpperCAmelCase : int =128
_UpperCAmelCase : "TrunkConfig" =None
def _UpperCAmelCase ( self : Optional[int] ):
if self.trunk is None:
A_ = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
A_ = TrunkConfig(**self.trunk )
def _UpperCAmelCase ( self : Optional[int] ):
A_ = asdict(self )
A_ = self.trunk.to_dict()
return output
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_UpperCAmelCase : int =48
_UpperCAmelCase : int =1024
_UpperCAmelCase : int =128
_UpperCAmelCase : int =32
_UpperCAmelCase : int =32
_UpperCAmelCase : int =32
_UpperCAmelCase : float =0
_UpperCAmelCase : float =0
_UpperCAmelCase : bool =False
_UpperCAmelCase : int =4
_UpperCAmelCase : Optional[int] =128
_UpperCAmelCase : "StructureModuleConfig" =None
def _UpperCAmelCase ( self : Any ):
if self.structure_module is None:
A_ = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
A_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
A_ = self.sequence_state_dim // self.sequence_head_width
A_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def _UpperCAmelCase ( self : Optional[int] ):
A_ = asdict(self )
A_ = self.structure_module.to_dict()
return output
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_UpperCAmelCase : int =384
_UpperCAmelCase : int =128
_UpperCAmelCase : int =16
_UpperCAmelCase : int =128
_UpperCAmelCase : int =12
_UpperCAmelCase : int =4
_UpperCAmelCase : int =8
_UpperCAmelCase : float =0.1
_UpperCAmelCase : int =8
_UpperCAmelCase : int =1
_UpperCAmelCase : int =2
_UpperCAmelCase : int =7
_UpperCAmelCase : int =10
_UpperCAmelCase : float =1e-8
_UpperCAmelCase : float =1e5
def _UpperCAmelCase ( self : List[str] ):
return asdict(self )
def a_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 452 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13 | 0 |
"""simple docstring"""
from collections.abc import Generator
from math import sin
def _lowerCamelCase ( UpperCAmelCase_ : bytes ) -> bytes:
"""simple docstring"""
if len(UpperCAmelCase_ ) != 32:
raise ValueError("Input must be of length 32" )
A__ = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> bytes:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
A__ = format(UpperCAmelCase_, "08x" )[-8:]
A__ = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _lowerCamelCase ( UpperCAmelCase_ : bytes ) -> bytes:
"""simple docstring"""
A__ = B''
for char in message:
bit_string += format(UpperCAmelCase_, "08b" ).encode("utf-8" )
A__ = format(len(UpperCAmelCase_ ), "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase_ ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowerCamelCase ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]:
"""simple docstring"""
if len(UpperCAmelCase_ ) % 512 != 0:
raise ValueError("Input must have length that\'s a multiple of 512" )
for pos in range(0, len(UpperCAmelCase_ ), 512 ):
A__ = bit_string[pos : pos + 512]
A__ = []
for i in range(0, 512, 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) )
yield block_words
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
A__ = format(UpperCAmelCase_, "032b" )
A__ = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase_, 2 )
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
return (a + b) % 2**32
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowerCamelCase ( UpperCAmelCase_ : bytes ) -> bytes:
"""simple docstring"""
A__ = preprocess(UpperCAmelCase_ )
A__ = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
A__ = 0X67_452_301
A__ = 0Xef_cda_b89
A__ = 0X98_bad_cfe
A__ = 0X10_325_476
A__ = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase_ ):
A__ = aa
A__ = ba
A__ = ca
A__ = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
A__ = d ^ (b & (c ^ d))
A__ = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
A__ = c ^ (d & (b ^ c))
A__ = (5 * i + 1) % 16
elif i <= 47:
A__ = b ^ c ^ d
A__ = (3 * i + 5) % 16
else:
A__ = c ^ (b | not_aa(UpperCAmelCase_ ))
A__ = (7 * i) % 16
A__ = (f + a + added_consts[i] + block_words[g]) % 2**32
A__ = d
A__ = c
A__ = b
A__ = sum_aa(UpperCAmelCase_, left_rotate_aa(UpperCAmelCase_, shift_amounts[i] ) )
# Add hashed chunk to running total
A__ = sum_aa(UpperCAmelCase_, UpperCAmelCase_ )
A__ = sum_aa(UpperCAmelCase_, UpperCAmelCase_ )
A__ = sum_aa(UpperCAmelCase_, UpperCAmelCase_ )
A__ = sum_aa(UpperCAmelCase_, UpperCAmelCase_ )
A__ = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 181 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13 | 0 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class __lowerCAmelCase ( nn.Module ):
_a = 42
_a = jnp.floataa
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase =hidden_states.shape
_lowercase =jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
_lowercase =self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
_a = 42
_a = jnp.floataa
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase =self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
_a = 42
_a = None
_a = 0.0
_a = None
_a = jnp.floataa
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.in_channels if self.out_channels is None else self.out_channels
_lowercase =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowercase =nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowercase =nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
_lowercase =nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowercase =nn.Dropout(self.dropout_prob )
_lowercase =nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowercase =self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowercase =None
if use_nin_shortcut:
_lowercase =nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True ) -> Tuple:
'''simple docstring'''
_lowercase =hidden_states
_lowercase =self.norma(SCREAMING_SNAKE_CASE_ )
_lowercase =nn.swish(SCREAMING_SNAKE_CASE_ )
_lowercase =self.conva(SCREAMING_SNAKE_CASE_ )
_lowercase =self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
_lowercase =jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
_lowercase =hidden_states + temb
_lowercase =self.norma(SCREAMING_SNAKE_CASE_ )
_lowercase =nn.swish(SCREAMING_SNAKE_CASE_ )
_lowercase =self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowercase =self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
_lowercase =self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 291 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = torch.device("""cpu""")
def SCREAMING_SNAKE_CASE( ) -> Any:
a__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a__ : int = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int:
a__ : Optional[Any] = dct.pop(UpperCAmelCase_ )
a__ : List[str] = val
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> Dict:
a__ : str = []
for k in state_dict.keys():
a__ : Optional[Any] = k
if ".pwconv" in k:
a__ : int = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
a__ : Any = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
a__ : List[Any] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
a__ : Union[str, Any] = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
a__ : Optional[Any] = k_new.split("." )
if ls[2].isdigit():
a__ : int = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ : List[Any] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
a__ : Any = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ : Optional[Any] = 10_00
a__ : Optional[Any] = 'huggingface/label-files'
a__ : Optional[int] = 'imagenet-1k-id2label.json'
a__ : Any = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) )
a__ : Optional[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
a__ : Any = idalabel
a__ : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ : Tuple = [3, 3, 6, 4]
a__ : Optional[Any] = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
a__ : Any = [3, 3, 9, 6]
a__ : Any = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
a__ : List[str] = [4, 3, 10, 5]
a__ : Union[str, Any] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
a__ : Union[str, Any] = [4, 4, 12, 6]
a__ : Dict = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
a__ : int = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="cpu" , check_hash=UpperCAmelCase_ )
else:
a__ : Tuple = torch.load(UpperCAmelCase_ , map_location="cpu" )
a__ : Tuple = checkpoint
a__ : Any = create_rename_keys(UpperCAmelCase_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
a__ : List[str] = SwiftFormerForImageClassification(UpperCAmelCase_ ).eval()
hf_model.load_state_dict(UpperCAmelCase_ )
# prepare test inputs
a__ : Dict = prepare_img()
a__ : Tuple = ViTImageProcessor.from_pretrained("preprocessor_config" )
a__ : Any = processor(images=UpperCAmelCase_ , return_tensors="pt" )
# compare outputs from both models
a__ : List[Any] = get_expected_output(UpperCAmelCase_ )
a__ : Dict = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , UpperCAmelCase_ , atol=1e-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
lowerCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 191 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
@slow
@require_torch
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
snake_case_ = BertTokenizer.from_pretrained("bert-base-uncased" )
snake_case_ = bertabert.config.encoder.vocab_size
snake_case_ = tokenizer.sep_token_id
snake_case_ = tokenizer.cls_token_id
snake_case_ = 1_2_8
snake_case_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
snake_case_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
snake_case_ = train_dataset.select(range(3_2 ) )
snake_case_ = val_dataset.select(range(1_6 ) )
snake_case_ = 4
def _map_to_encoder_decoder_inputs(_lowerCAmelCase : str ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case_ = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE_ , max_length=5_1_2 )
snake_case_ = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE_ , max_length=1_2_8 )
snake_case_ = inputs.input_ids
snake_case_ = inputs.attention_mask
snake_case_ = outputs.input_ids
snake_case_ = outputs.input_ids.copy()
snake_case_ = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
snake_case_ = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE_ ) == 5_1_2 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE_ ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(_lowerCAmelCase : Optional[Any] ):
snake_case_ = pred.label_ids
snake_case_ = pred.predictions
# all unnecessary tokens are removed
snake_case_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) / len(SCREAMING_SNAKE_CASE_ )
return {"accuracy": accuracy}
# map train dataset
snake_case_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
snake_case_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE_ , per_device_train_batch_size=SCREAMING_SNAKE_CASE_ , per_device_eval_batch_size=SCREAMING_SNAKE_CASE_ , predict_with_generate=SCREAMING_SNAKE_CASE_ , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE_ , do_eval=SCREAMING_SNAKE_CASE_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case_ = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , )
# start training
trainer.train()
| 283 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]:
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : Any = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[int] = num_patches + 1
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (self.image_size, self.image_size)
__lowerCamelCase : str = (self.patch_size, self.patch_size)
__lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Tuple = self.type_sequence_label_size
__lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase_ ( self ) -> None:
__lowerCamelCase : str = FlaxViTModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowercase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
__lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Tuple = ['image_processor', 'tokenizer']
A_ : Union[str, Any] = 'CLIPImageProcessor'
A_ : Union[str, Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self : List[Any] , a__ : str=None , a__ : Tuple=None , **a__ : int ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__(self : Optional[Any] , a__ : Optional[int]=None , a__ : Any=None , a__ : Dict=None , **a__ : List[Any] ):
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__snake_case = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
__snake_case = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def a (self : int , *a__ : str , **a__ : str ):
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a (self : List[str] , *a__ : List[Any] , **a__ : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a (self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def a (self : List[Any] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 592 |
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
lowerCamelCase : str = {
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
lowerCamelCase : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class snake_case__ ( _UpperCAmelCase ):
_lowerCAmelCase =VOCAB_FILES_NAMES
_lowerCAmelCase =PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase =PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase =RoFormerTokenizer
def __init__( self : Union[str, Any] , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Tuple="[UNK]" , _lowerCamelCase : Optional[int]="[SEP]" , _lowerCamelCase : List[Any]="[PAD]" , _lowerCamelCase : int="[CLS]" , _lowerCamelCase : Optional[Any]="[MASK]" , _lowerCamelCase : Tuple=True , _lowerCamelCase : Dict=None , **_lowerCamelCase : Tuple , ):
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
snake_case__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
snake_case__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
snake_case__ : Union[str, Any] = do_lower_case
snake_case__ : str = strip_accents
snake_case__ : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case__ : Tuple = do_lower_case
def __getstate__( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = self.__dict__.copy()
snake_case__ : Dict = BertPreTokenizer()
return state
def __setstate__( self : Optional[int] , _lowerCamelCase : Dict ):
snake_case__ : Optional[int] = d
snake_case__ : List[Any] = self.__dict__['_tokenizer'].get_vocab()
snake_case__ : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]=None ):
snake_case__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int = None ):
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : str , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] = None ):
snake_case__ : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Any=False , **_lowerCamelCase : Optional[Any] , ):
snake_case__ : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 170 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
import sys
__snake_case = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = 1
for digit in s:
product *= int(UpperCAmelCase_ )
return product
def A_ ( SCREAMING_SNAKE_CASE_ = N ) ->int:
lowercase_ = -sys.maxsize - 1
lowercase_ = n[:13]
lowercase_ = 13
while cur_index < len(UpperCAmelCase_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowercase_ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowercase_ = max(UpperCAmelCase_ , str_eval(UpperCAmelCase_ ) )
lowercase_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 451 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
UpperCamelCase_ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def _lowerCamelCase ( lowerCamelCase_: str , lowerCamelCase_: Optional[int]=100 , lowerCamelCase_: Any=" " ):
'''simple docstring'''
A : Any = text.split(UpperCAmelCase_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )]
def _lowerCamelCase ( lowerCamelCase_: dict ):
'''simple docstring'''
A : List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(UpperCAmelCase_ ):
titles.append(title if title is not None else '''''' )
texts.append(UpperCAmelCase_ )
return {"title": titles, "text": texts}
def _lowerCamelCase ( lowerCamelCase_: dict , lowerCamelCase_: DPRContextEncoder , lowerCamelCase_: DPRContextEncoderTokenizerFast ):
'''simple docstring'''
A : Union[str, Any] = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=UpperCAmelCase_ , padding='''longest''' , return_tensors='''pt''' )['input_ids']
A : Tuple = ctx_encoder(input_ids.to(device=UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _lowerCamelCase ( lowerCamelCase_: "RagExampleArguments" , lowerCamelCase_: "ProcessingArguments" , lowerCamelCase_: "IndexHnswArguments" , ):
'''simple docstring'''
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
A : int = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
A : str = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=processing_args.num_proc )
# And compute the embeddings
A : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase_ )
A : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
A : Any = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
A : List[str] = dataset.map(
partial(UpperCAmelCase_ , ctx_encoder=UpperCAmelCase_ , ctx_tokenizer=UpperCAmelCase_ ) , batched=UpperCAmelCase_ , batch_size=processing_args.batch_size , features=UpperCAmelCase_ , )
# And finally save your dataset
A : Dict = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(UpperCAmelCase_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
A : int = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=UpperCAmelCase_ )
# And save the index
A : Union[str, Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(UpperCAmelCase_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ), metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''}, )
lowerCamelCase_ = field(
default=_UpperCAmelCase, metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'}, )
lowerCamelCase_ = field(
default='facebook/rag-sequence-nq', metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''}, )
lowerCamelCase_ = field(
default='facebook/dpr-ctx_encoder-multiset-base', metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
}, )
lowerCamelCase_ = field(
default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' ), metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'}, )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=_UpperCAmelCase, metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
}, )
lowerCamelCase_ = field(
default=1_6, metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
}, )
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = field(
default=7_6_8, metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'}, )
lowerCamelCase_ = field(
default=1_2_8, metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
}, )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
UpperCamelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
UpperCamelCase_ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 256 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Union[str, Any]=18 , UpperCamelCase_ : Union[str, Any]=30 , UpperCamelCase_ : List[str]=400 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : int=[0.48145466, 0.4578275, 0.40821073] , UpperCamelCase_ : str=[0.26862954, 0.26130258, 0.27577711] , UpperCamelCase_ : List[Any]=True , ):
"""simple docstring"""
__UpperCAmelCase : int = size if size is not None else {'height': 224, 'width': 224}
__UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Dict = min_resolution
__UpperCAmelCase : List[Any] = max_resolution
__UpperCAmelCase : str = do_resize
__UpperCAmelCase : Union[str, Any] = size
__UpperCAmelCase : Any = do_center_crop
__UpperCAmelCase : List[str] = crop_size
__UpperCAmelCase : Union[str, Any] = do_normalize
__UpperCAmelCase : Optional[int] = image_mean
__UpperCAmelCase : int = image_std
__UpperCAmelCase : List[str] = do_convert_rgb
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def a_ ( self : Tuple , UpperCamelCase_ : Any=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=False):
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__UpperCAmelCase : Dict = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__UpperCAmelCase : int = []
for i in range(self.batch_size):
__UpperCAmelCase : Tuple = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__UpperCAmelCase : int = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1)) for x in image_inputs]
if torchify:
__UpperCAmelCase : Dict = [torch.from_numpy(SCREAMING_SNAKE_CASE_) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class a__ ( _UpperCAmelCase , unittest.TestCase ):
lowercase_ = ChineseCLIPImageProcessor if is_vision_available() else None
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE_)
@property
def a_ ( self : Optional[int]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_resize"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "size"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_center_crop"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "center_crop"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_normalize"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_mean"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_std"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_convert_rgb"))
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 224, "width": 224})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
__UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def a_ ( self : List[Any]):
"""simple docstring"""
pass
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__UpperCAmelCase : Any = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Tuple = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a_ ( self : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__UpperCAmelCase : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray)
# Test not batched input
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : int = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__UpperCAmelCase : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor)
# Test not batched input
__UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : List[str] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class a__ ( _UpperCAmelCase , unittest.TestCase ):
lowercase_ = ChineseCLIPImageProcessor if is_vision_available() else None
def a_ ( self : List[Any]):
"""simple docstring"""
__UpperCAmelCase : str = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE_)
__UpperCAmelCase : List[str] = 3
@property
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_resize"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "size"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_center_crop"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "center_crop"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_normalize"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_mean"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_std"))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_convert_rgb"))
def a_ ( self : Optional[Any]):
"""simple docstring"""
pass
def a_ ( self : List[str]):
"""simple docstring"""
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__UpperCAmelCase : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE_)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image)
# Test not batched input
__UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 77 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Any=13 , lowerCAmelCase : Optional[int]=30 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : int=37 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Union[str, Any]=10 , lowerCAmelCase : Union[str, Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : str=None , lowerCAmelCase : str=2 , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = scope
A_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 2
def _UpperCAmelCase ( self : Dict ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Dict ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
A_ = TFDeiTModel(config=SCREAMING_SNAKE_CASE_ )
A_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] ):
A_ = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
A_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A_ = 1
A_ = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ):
A_ = self.type_sequence_label_size
A_ = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
A_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ = 1
A_ = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE_ )
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self : Optional[int] ):
A_ = self.prepare_config_and_inputs()
A_ = config_and_inputs
A_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Any =(
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : List[str] =(
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[int] =False
_UpperCAmelCase : List[Any] =False
_UpperCAmelCase : List[str] =False
_UpperCAmelCase : Any =False
def _UpperCAmelCase ( self : Optional[int] ):
A_ = TFDeiTModelTester(self )
A_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _UpperCAmelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def _UpperCAmelCase ( self : Optional[Any] ):
pass
def _UpperCAmelCase ( self : Union[str, Any] ):
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Dense ) )
def _UpperCAmelCase ( self : Any ):
A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(SCREAMING_SNAKE_CASE_ )
A_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Dict ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple=False ):
A_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _UpperCAmelCase ( self : Any ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def a_ ( ):
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self : Union[str, Any] ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
A_ = TFDeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="tf" )
# forward pass
A_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
A_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
A_ = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 452 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ : Dict = TaTokenizerFast
A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : dict ) -> str:
"""simple docstring"""
A__ = BeautifulSoup(requests.get(UpperCAmelCase_, params=UpperCAmelCase_ ).content, "html.parser" )
A__ = soup.find("div", attrs={"class": "gs_ri"} )
A__ = div.find("div", attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCamelCase = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 104 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 0 |
from __future__ import annotations
def _snake_case (_snake_case : int) -> bool:
_lowercase =str(UpperCAmelCase_)
return len(UpperCAmelCase_) == 9 and set(UpperCAmelCase_) == set('123456789')
def _snake_case () -> int | None:
for base_num in range(9999 , 4999 , -1):
_lowercase =10_0002 * base_num
if is_9_pandigital(UpperCAmelCase_):
return candidate
for base_num in range(333 , 99 , -1):
_lowercase =100_2003 * base_num
if is_9_pandigital(UpperCAmelCase_):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 181 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13 | 0 |
import argparse
import copy
def a ( A__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_lowercase ={}
with open(UpperCAmelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_lowercase =[]
_list.append([line.split()[1], line.split()[2]] )
_lowercase =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_lowercase =[]
_list.append([line.split()[0], line.split()[2]] )
_lowercase =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def a ( A__ : Optional[int] , A__ : Tuple ) -> str:
"""simple docstring"""
with open(UpperCAmelCase_ ) as f:
_lowercase =f.read(1 )
_lowercase =start_node
_lowercase =[]
_lowercase =start_node
_lowercase =0
while visiting not in first_solution:
_lowercase =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCAmelCase_ ) and k[0] not in first_solution:
_lowercase =k[1]
_lowercase =k[0]
first_solution.append(UpperCAmelCase_ )
_lowercase =distance_of_first_solution + int(UpperCAmelCase_ )
_lowercase =best_node
first_solution.append(UpperCAmelCase_ )
_lowercase =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_lowercase =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def a ( A__ : int , A__ : Any ) -> int:
"""simple docstring"""
_lowercase =[]
for n in solution[1:-1]:
_lowercase =solution.index(UpperCAmelCase_ )
for kn in solution[1:-1]:
_lowercase =solution.index(UpperCAmelCase_ )
if n == kn:
continue
_lowercase =copy.deepcopy(UpperCAmelCase_ )
_lowercase =kn
_lowercase =n
_lowercase =0
for k in _tmp[:-1]:
_lowercase =_tmp[_tmp.index(UpperCAmelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_lowercase =distance + int(i[1] )
_tmp.append(UpperCAmelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_lowercase =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda A__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def a ( A__ : str , A__ : Any , A__ : Any , A__ : Dict , A__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_lowercase =1
_lowercase =first_solution
_lowercase =[]
_lowercase =distance_of_first_solution
_lowercase =solution
while count <= iters:
_lowercase =find_neighborhood(UpperCAmelCase_ , UpperCAmelCase_ )
_lowercase =0
_lowercase =neighborhood[index_of_best_solution]
_lowercase =len(UpperCAmelCase_ ) - 1
_lowercase =False
while not found:
_lowercase =0
while i < len(UpperCAmelCase_ ):
if best_solution[i] != solution[i]:
_lowercase =best_solution[i]
_lowercase =solution[i]
break
_lowercase =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_lowercase =True
_lowercase =best_solution[:-1]
_lowercase =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_lowercase =cost
_lowercase =solution
else:
_lowercase =index_of_best_solution + 1
_lowercase =neighborhood[index_of_best_solution]
if len(UpperCAmelCase_ ) >= size:
tabu_list.pop(0 )
_lowercase =count + 1
return best_solution_ever, best_cost
def a ( A__ : Any=None ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =generate_neighbours(args.File )
_lowercase =generate_first_solution(
args.File , UpperCAmelCase_ )
_lowercase =tabu_search(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 291 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Dict = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]:
require_version(deps[pkg] , UpperCAmelCase_ )
| 13 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowerCamelCase = 8
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase=BITS ) -> int:
a__ : List[Any] = x.device
a__ : Union[str, Any] = (x * 2_55).int().clamp(0 , 2_55 )
a__ : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase_ )
a__ : List[str] = rearrange(UpperCAmelCase_ , "d -> d 1 1" )
a__ : Optional[int] = rearrange(UpperCAmelCase_ , "b c h w -> b c 1 h w" )
a__ : int = ((x & mask) != 0).float()
a__ : List[Any] = rearrange(UpperCAmelCase_ , "b c d h w -> b (c d) h w" )
a__ : str = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase=BITS ) -> Tuple:
a__ : int = x.device
a__ : Tuple = (x > 0).int()
a__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCAmelCase_ , dtype=torch.intaa )
a__ : Tuple = rearrange(UpperCAmelCase_ , "d -> d 1 1" )
a__ : List[Any] = rearrange(UpperCAmelCase_ , "b (c d) h w -> b c d h w" , d=8 )
a__ : Dict = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0.0 , __UpperCamelCase = True , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
a__ : Tuple = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
a__ : Any = self.alphas_cumprod[timestep]
a__ : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
a__ : Dict = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
a__ : Any = self.bit_scale
if self.config.clip_sample:
a__ : Tuple = torch.clamp(UpperCAmelCase_ , -scale , UpperCAmelCase_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
a__ : int = self._get_variance(UpperCAmelCase_ , UpperCAmelCase_ )
a__ : Any = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
a__ : Any = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : List[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
a__ : List[Any] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
a__ : int = model_output.device if torch.is_tensor(UpperCAmelCase_ ) else 'cpu'
a__ : List[Any] = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase_ ).to(UpperCAmelCase_ )
a__ : int = self._get_variance(UpperCAmelCase_ , UpperCAmelCase_ ) ** 0.5 * eta * noise
a__ : Tuple = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="epsilon" , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
a__ : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
a__ : int = torch.split(UpperCAmelCase_ , sample.shape[1] , dim=1 )
else:
a__ : Union[str, Any] = None
# 1. compute alphas, betas
a__ : Tuple = self.alphas_cumprod[t]
a__ : Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one
a__ : Optional[Any] = 1 - alpha_prod_t
a__ : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
a__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
a__ : Union[str, Any] = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
a__ : List[Any] = self.bit_scale
if self.config.clip_sample:
a__ : List[Any] = torch.clamp(UpperCAmelCase_ , -scale , UpperCAmelCase_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a__ : List[str] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
a__ : Dict = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a__ : Any = 0
if t > 0:
a__ : str = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCAmelCase_ ).to(model_output.device )
a__ : str = (self._get_variance(UpperCAmelCase_ , predicted_variance=UpperCAmelCase_ ) ** 0.5) * noise
a__ : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ )
class _a ( _UpperCAmelCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1.0 , ):
"""simple docstring"""
super().__init__()
a__ : Tuple = bit_scale
a__ : str = (
ddim_bit_scheduler_step if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 256 , __UpperCAmelCase = 256 , __UpperCAmelCase = 50 , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
"""simple docstring"""
a__ : Optional[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=SCREAMING_SNAKE_CASE_ , )
a__ : Optional[Any] = decimal_to_bits(SCREAMING_SNAKE_CASE_ ) * self.bit_scale
a__ : Optional[int] = latents.to(self.device )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
a__ : List[str] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute the previous noisy sample x_t -> x_t-1
a__ : Tuple = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
a__ : Dict = bits_to_decimal(SCREAMING_SNAKE_CASE_ )
if output_type == "pil":
a__ : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 191 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13 | 0 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE :Optional[Any] = (7_20, 12_80) # Height, Width
SCREAMING_SNAKE_CASE :int = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE :int = 1 / 1_00
SCREAMING_SNAKE_CASE :Dict = """"""
SCREAMING_SNAKE_CASE :int = """"""
SCREAMING_SNAKE_CASE :int = """"""
SCREAMING_SNAKE_CASE :List[str] = 2_50
def _lowerCAmelCase ( )->None:
'''simple docstring'''
snake_case_ = get_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
for index in range(UpperCAmelCase_ ):
snake_case_ = random.sample(range(len(UpperCAmelCase_ ) ) , 4 )
snake_case_ = update_image_and_anno(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , filter_scale=UpperCAmelCase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case_ = random_chars(32 )
snake_case_ = path.split(os.sep )[-1].rsplit("." , 1 )[0]
snake_case_ = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , UpperCAmelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
snake_case_ = []
for anno in new_annos:
snake_case_ = anno[3] - anno[1]
snake_case_ = anno[4] - anno[2]
snake_case_ = anno[1] + width / 2
snake_case_ = anno[2] + height / 2
snake_case_ = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(UpperCAmelCase_ )
with open(F'''{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str )->tuple[list, list]:
'''simple docstring'''
snake_case_ = []
snake_case_ = []
for label_file in glob.glob(os.path.join(UpperCAmelCase_ , "*.txt" ) ):
snake_case_ = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(UpperCAmelCase_ ) as in_file:
snake_case_ = in_file.readlines()
snake_case_ = os.path.join(UpperCAmelCase_ , F'''{label_name}.jpg''' )
snake_case_ = []
for obj_list in obj_lists:
snake_case_ = obj_list.rstrip("\n" ).split(" " )
snake_case_ = float(obj[1] ) - float(obj[3] ) / 2
snake_case_ = float(obj[2] ) - float(obj[4] ) / 2
snake_case_ = float(obj[1] ) + float(obj[3] ) / 2
snake_case_ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCAmelCase_ )
labels.append(UpperCAmelCase_ )
return img_paths, labels
def _lowerCAmelCase ( lowerCAmelCase_ :list , lowerCAmelCase_ :list , lowerCAmelCase_ :list[int] , lowerCAmelCase_ :tuple[int, int] , lowerCAmelCase_ :tuple[float, float] , lowerCAmelCase_ :float = 0.0 , )->tuple[list, list, str]:
'''simple docstring'''
snake_case_ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case_ = int(scale_x * output_size[1] )
snake_case_ = int(scale_y * output_size[0] )
snake_case_ = []
snake_case_ = []
for i, index in enumerate(UpperCAmelCase_ ):
snake_case_ = all_img_list[index]
path_list.append(UpperCAmelCase_ )
snake_case_ = all_annos[index]
snake_case_ = cva.imread(UpperCAmelCase_ )
if i == 0: # top-left
snake_case_ = cva.resize(UpperCAmelCase_ , (divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = bbox[2] * scale_y
snake_case_ = bbox[3] * scale_x
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case_ = cva.resize(UpperCAmelCase_ , (output_size[1] - divid_point_x, divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = bbox[2] * scale_y
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case_ = cva.resize(UpperCAmelCase_ , (divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = bbox[1] * scale_x
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = bbox[3] * scale_x
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case_ = cva.resize(
UpperCAmelCase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case_ = img
for bbox in img_annos:
snake_case_ = scale_x + bbox[1] * (1 - scale_x)
snake_case_ = scale_y + bbox[2] * (1 - scale_y)
snake_case_ = scale_x + bbox[3] * (1 - scale_x)
snake_case_ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case_ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _lowerCAmelCase ( lowerCAmelCase_ :int )->str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
snake_case_ = ascii_lowercase + digits
return "".join(random.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 283 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Tuple ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
__snake_case = 8
# DPR tok
__snake_case = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__snake_case = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a (self : Any ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def a (self : Union[str, Any] ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def a (self : Any ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def a (self : Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a (self : str ):
"""simple docstring"""
__snake_case = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.get_dummy_dataset()
__snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__snake_case = dataset
__snake_case = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def a (self : Union[str, Any] , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.get_dummy_dataset()
__snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__snake_case = os.path.join(self.tmpdirname , '''dataset''' )
__snake_case = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__snake_case = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__snake_case = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def a (self : str ):
"""simple docstring"""
__snake_case = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__snake_case = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__snake_case = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
__snake_case = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__snake_case = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def a (self : Dict ):
"""simple docstring"""
__snake_case = 1
__snake_case = self.get_dummy_canonical_hf_index_retriever()
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a (self : Tuple ):
"""simple docstring"""
__snake_case = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__snake_case = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def a (self : str ):
"""simple docstring"""
__snake_case = 1
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a (self : int ):
"""simple docstring"""
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = 1
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a (self : Any ):
"""simple docstring"""
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def a (self : Dict ):
"""simple docstring"""
__snake_case = 1
__snake_case = self.get_dummy_legacy_index_retriever()
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def a (self : Tuple ):
"""simple docstring"""
import torch
__snake_case = 1
__snake_case = self.get_dummy_canonical_hf_index_retriever()
__snake_case = [[5, 7], [10, 11]]
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
__snake_case = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
__snake_case = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
__snake_case = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def a (self : int ):
"""simple docstring"""
__snake_case = self.get_dpr_ctx_encoder_tokenizer()
__snake_case = 1
__snake_case = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
__snake_case = [[5, 7], [10, 11]]
__snake_case = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__snake_case = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary.
| 592 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class snake_case__ ( _UpperCAmelCase ):
_lowerCAmelCase ='owlvit_text_model'
def __init__( self : str , _lowerCamelCase : List[Any]=4_9_4_0_8 , _lowerCamelCase : Union[str, Any]=5_1_2 , _lowerCamelCase : Dict=2_0_4_8 , _lowerCamelCase : Dict=1_2 , _lowerCamelCase : Tuple=8 , _lowerCamelCase : Dict=1_6 , _lowerCamelCase : Any="quick_gelu" , _lowerCamelCase : Any=1E-5 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : Tuple=0.02 , _lowerCamelCase : str=1.0 , _lowerCamelCase : str=0 , _lowerCamelCase : List[str]=4_9_4_0_6 , _lowerCamelCase : Dict=4_9_4_0_7 , **_lowerCamelCase : List[Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
snake_case__ : List[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : int = intermediate_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = layer_norm_eps
snake_case__ : Tuple = attention_dropout
snake_case__ : Tuple = initializer_range
snake_case__ : Optional[Any] = initializer_factor
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , _lowerCamelCase : int , **_lowerCamelCase : Union[str, Any] ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
snake_case__ : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
snake_case__ : str = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class snake_case__ ( _UpperCAmelCase ):
_lowerCAmelCase ='owlvit_vision_model'
def __init__( self : int , _lowerCamelCase : Any=7_6_8 , _lowerCamelCase : Union[str, Any]=3_0_7_2 , _lowerCamelCase : Union[str, Any]=1_2 , _lowerCamelCase : List[Any]=1_2 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Dict=7_6_8 , _lowerCamelCase : List[Any]=3_2 , _lowerCamelCase : List[Any]="quick_gelu" , _lowerCamelCase : Dict=1E-5 , _lowerCamelCase : str=0.0 , _lowerCamelCase : List[str]=0.02 , _lowerCamelCase : str=1.0 , **_lowerCamelCase : Optional[int] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
snake_case__ : str = hidden_size
snake_case__ : str = intermediate_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : str = num_channels
snake_case__ : Dict = image_size
snake_case__ : str = patch_size
snake_case__ : Tuple = hidden_act
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Tuple = attention_dropout
snake_case__ : Optional[int] = initializer_range
snake_case__ : Any = initializer_factor
@classmethod
def UpperCAmelCase__ ( cls : Any , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Optional[Any] ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
snake_case__ : Tuple = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class snake_case__ ( _UpperCAmelCase ):
_lowerCAmelCase ='owlvit'
_lowerCAmelCase =True
def __init__( self : List[Any] , _lowerCamelCase : int=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : str=5_1_2 , _lowerCamelCase : Tuple=2.6592 , _lowerCamelCase : Tuple=True , **_lowerCamelCase : Union[str, Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
if text_config is None:
snake_case__ : Dict = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
snake_case__ : Optional[Any] = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
snake_case__ : List[str] = OwlViTTextConfig(**SCREAMING_SNAKE_CASE_ )
snake_case__ : List[Any] = OwlViTVisionConfig(**SCREAMING_SNAKE_CASE_ )
snake_case__ : Any = projection_dim
snake_case__ : List[str] = logit_scale_init_value
snake_case__ : List[Any] = return_dict
snake_case__ : str = 1.0
@classmethod
def UpperCAmelCase__ ( cls : Any , _lowerCamelCase : Optional[int] , **_lowerCamelCase : List[Any] ):
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
snake_case__ : str = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , **_lowerCamelCase : Optional[Any] ):
snake_case__ : Tuple = {}
snake_case__ : Optional[int] = text_config
snake_case__ : Dict = vision_config
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : int = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.text_config.to_dict()
snake_case__ : Any = self.vision_config.to_dict()
snake_case__ : str = self.__class__.model_type
return output
class snake_case__ ( _UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def UpperCAmelCase__ ( self : int ):
return 1E-4
def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Tuple = -1 , _lowerCamelCase : Optional[int] = -1 , _lowerCamelCase : List[Any] = None , ):
snake_case__ : Optional[int] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
snake_case__ : str = super().generate_dummy_inputs(
processor.image_processor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
return {**text_input_dict, **image_input_dict}
@property
def UpperCAmelCase__ ( self : List[str] ):
return 1_4
| 170 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
if len(UpperCAmelCase_ ) != 32:
raise ValueError('Input must be of length 32' )
__lowerCamelCase : Dict = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:]
__lowerCamelCase : str = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = B''
for char in message:
bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' )
__lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase_ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]:
if len(UpperCAmelCase_ ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ):
__lowerCamelCase : Any = bit_string[pos : pos + 5_12]
__lowerCamelCase : Optional[int] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' )
__lowerCamelCase : Optional[int] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase_ , 2 )
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
return (a + b) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowerCamelCase : Dict = 0x67_45_23_01
__lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89
__lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe
__lowerCamelCase : Union[str, Any] = 0x10_32_54_76
__lowerCamelCase : List[str] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase_ ):
__lowerCamelCase : Dict = aa
__lowerCamelCase : Tuple = ba
__lowerCamelCase : List[Any] = ca
__lowerCamelCase : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowerCamelCase : List[str] = d ^ (b & (c ^ d))
__lowerCamelCase : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowerCamelCase : Optional[int] = c ^ (d & (b ^ c))
__lowerCamelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
__lowerCamelCase : str = b ^ c ^ d
__lowerCamelCase : Any = (3 * i + 5) % 16
else:
__lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ ))
__lowerCamelCase : int = (7 * i) % 16
__lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowerCamelCase : Optional[Any] = d
__lowerCamelCase : Tuple = c
__lowerCamelCase : Optional[int] = b
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
def A_ ( ) ->int:
return 1
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(UpperCAmelCase_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(UpperCAmelCase_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(UpperCAmelCase_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(UpperCAmelCase_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return 0 if x < 0 else one_pound(x - 1_00 ) + fifty_pence(UpperCAmelCase_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return 0 if x < 0 else two_pound(x - 2_00 ) + one_pound(UpperCAmelCase_ )
def A_ ( SCREAMING_SNAKE_CASE_ = 2_00 ) ->int:
return two_pound(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 451 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'rwkv'
lowerCamelCase : Any = {'max_position_embeddings': 'context_length'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Tuple = context_length
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : Optional[Any] = layer_norm_epsilon
__lowerCamelCase : int = rescale_every
__lowerCamelCase : Tuple = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCamelCase_ = """__DUMMY_TRANSFORMERS_USER__"""
UpperCamelCase_ = """Dummy User"""
UpperCamelCase_ = """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"""
UpperCamelCase_ = """https://hub-ci.huggingface.co"""
UpperCamelCase_ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
UpperCamelCase_ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
UpperCamelCase_ = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: Union[str, Any] ):
'''simple docstring'''
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , UpperCAmelCase_ )
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: Tuple ):
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , UpperCAmelCase_ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , UpperCAmelCase_ )
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: Any ):
'''simple docstring'''
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , UpperCAmelCase_ )
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: Any , lowerCamelCase_: List[str] ):
'''simple docstring'''
HfFolder.save_token(UpperCAmelCase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( ):
'''simple docstring'''
return HfApi(endpoint=UpperCAmelCase_ )
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( lowerCamelCase_: HfApi ):
'''simple docstring'''
A : List[Any] = HfFolder.get_token()
HfFolder.save_token(UpperCAmelCase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(UpperCAmelCase_ )
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: str ):
'''simple docstring'''
def _cleanup_repo(lowerCamelCase_: List[str] ):
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def _lowerCamelCase ( lowerCamelCase_: int ):
'''simple docstring'''
@contextmanager
def _temporary_repo(lowerCamelCase_: Union[str, Any] ):
try:
yield repo_id
finally:
cleanup_repo(UpperCAmelCase_ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( lowerCamelCase_: HfApi , lowerCamelCase_: Optional[int] , lowerCamelCase_: List[Any] ):
'''simple docstring'''
A : int = f"""repo_txt_data-{int(time.time() * 10e3 )}"""
A : Optional[Any] = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='''data/text_data.txt''' , repo_id=UpperCAmelCase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: Any ):
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( lowerCamelCase_: HfApi , lowerCamelCase_: Tuple , lowerCamelCase_: Any ):
'''simple docstring'''
A : List[str] = f"""repo_zipped_txt_data-{int(time.time() * 10e3 )}"""
A : int = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='''data.zip''' , repo_id=UpperCAmelCase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase ( lowerCamelCase_: Tuple , lowerCamelCase_: Optional[Any] , lowerCamelCase_: str ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def _lowerCamelCase ( lowerCamelCase_: HfApi , lowerCamelCase_: Dict , lowerCamelCase_: Tuple ):
'''simple docstring'''
A : Tuple = f"""repo_zipped_img_data-{int(time.time() * 10e3 )}"""
A : Any = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' , private=UpperCAmelCase_ )
hf_api.upload_file(
token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='''data.zip''' , repo_id=UpperCAmelCase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def _lowerCamelCase ( lowerCamelCase_: List[Any] , lowerCamelCase_: Tuple , lowerCamelCase_: List[Any] ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_ | 256 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : str = int(UpperCAmelCase_ )
assert noofclusters < len(UpperCAmelCase_ )
# Find out the dimensionality
__UpperCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(UpperCAmelCase_ ) ) )
shuffle(UpperCAmelCase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCAmelCase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : int = tf.placeholder("float64" , [dim] )
__UpperCAmelCase : Union[str, Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCAmelCase_ , UpperCAmelCase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(UpperCAmelCase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : int = tf.placeholder("int32" )
__UpperCAmelCase : List[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCAmelCase_ , UpperCAmelCase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : List[str] = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : int = tf.reduce_mean(UpperCAmelCase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : List[str] = tf.placeholder("float" , [dim] )
__UpperCAmelCase : List[Any] = tf.placeholder("float" , [dim] )
__UpperCAmelCase : Dict = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCAmelCase_ , UpperCAmelCase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : List[Any] = tf.placeholder("float" , [noofclusters] )
__UpperCAmelCase : Tuple = tf.argmin(UpperCAmelCase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Any = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCAmelCase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : List[Any] = 100
for _ in range(UpperCAmelCase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCAmelCase_ ) ):
__UpperCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[str] = [
sess.run(UpperCAmelCase_ , feed_dict={va: vect, va: sess.run(UpperCAmelCase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Dict = sess.run(
UpperCAmelCase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCAmelCase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(UpperCAmelCase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : Tuple = sess.run(
UpperCAmelCase_ , feed_dict={mean_input: array(UpperCAmelCase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : str = sess.run(UpperCAmelCase_ )
__UpperCAmelCase : Any = sess.run(UpperCAmelCase_ )
return centroids, assignments
| 77 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Dict = XGLMConfig
lowerCamelCase : List[str] = {}
lowerCamelCase : Union[str, Any] = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any:
__lowerCamelCase : int = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = ffn_dim
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : List[Any] = activation_dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : int = None
__lowerCamelCase : int = 0
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Tuple = 1
def lowercase_ ( self ) -> Any:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : str = self.get_config()
__lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase_ ( self ) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = config_and_inputs
__lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Any = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : str = TFXGLMModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 )
def lowercase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowercase_ ( self ) -> Any:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]:
__lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__lowerCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] )
__lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = 'left'
# use different length sentences to test batching
__lowerCamelCase : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
| 13 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : str = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int =GPTSwaTokenizer
_UpperCAmelCase : List[str] =False
_UpperCAmelCase : Optional[Any] =True
_UpperCAmelCase : int =False
def _UpperCAmelCase ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ = GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[Any] ):
A_ = 'This is a test'
A_ = 'This is a test'
return input_text, output_text
def _UpperCAmelCase ( self : Any ):
A_ = '<s>'
A_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
A_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 20_00 )
def _UpperCAmelCase ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def _UpperCAmelCase ( self : Dict ):
A_ = GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ )
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
A_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
A_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def _UpperCAmelCase ( self : Optional[int] ):
A_ = GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ )
A_ = ['This is a test', 'I was born in 92000, and this is falsé.']
A_ = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
A_ = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
A_ = {'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=SCREAMING_SNAKE_CASE_ , )
| 452 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 104 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 | 0 |
_SCREAMING_SNAKE_CASE = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
_SCREAMING_SNAKE_CASE = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def _snake_case (_snake_case : float , _snake_case : str , _snake_case : str) -> float:
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_lowercase =(
f'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
f'''Valid values are: {", ".join(UpperCAmelCase_)}'''
)
raise ValueError(UpperCAmelCase_)
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def a ( A__ : Tuple ) -> int:
"""simple docstring"""
_lowercase =FileLock(str(tmpdir / 'foo.lock' ) )
_lowercase =FileLock(str(tmpdir / 'foo.lock' ) )
_lowercase =0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
_lowercase =time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def a ( A__ : Optional[Any] ) -> Dict:
"""simple docstring"""
_lowercase ='a' * 1000 + '.lock'
_lowercase =FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
_lowercase =FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 291 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.0_2 , ):
"""simple docstring"""
a__ : Optional[int] = parent
a__ : Dict = batch_size
a__ : int = image_size
a__ : List[str] = patch_size
a__ : Optional[int] = num_channels
a__ : Any = is_training
a__ : Dict = use_labels
a__ : List[Any] = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : Dict = intermediate_size
a__ : Union[str, Any] = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : str = type_sequence_label_size
a__ : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : str = (image_size // patch_size) ** 2
a__ : Optional[int] = num_patches + 1
def _A ( self ):
"""simple docstring"""
a__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
a__ : str = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
a__ : str = (self.image_size, self.image_size)
a__ : str = (self.patch_size, self.patch_size)
a__ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Tuple = self.type_sequence_label_size
a__ : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
a__ : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ : List[str] = 1
a__ : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
a__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ : List[Any] = model(SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = self.prepare_config_and_inputs()
(
a__
) : int = config_and_inputs
a__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _a ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
A :str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _A ( self ):
"""simple docstring"""
a__ : str = FlaxViTModelTester(self )
a__ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
a__ : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : List[str] = [*signature.parameters.keys()]
a__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(__UpperCAmelCase , **__UpperCAmelCase ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest("JIT Enabled" ):
a__ : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a__ : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _A ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
a__ : Union[str, Any] = model_class_name.from_pretrained("google/vit-base-patch16-224" )
a__ : Union[str, Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 191 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :int = 100 )->int:
'''simple docstring'''
snake_case_ = n * (n + 1) * (2 * n + 1) / 6
snake_case_ = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 283 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]:
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : Any = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[int] = num_patches + 1
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (self.image_size, self.image_size)
__lowerCamelCase : str = (self.patch_size, self.patch_size)
__lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Tuple = self.type_sequence_label_size
__lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase_ ( self ) -> None:
__lowerCamelCase : str = FlaxViTModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowercase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
__lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : str ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
# fmt: off
__snake_case = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__snake_case = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__snake_case = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__snake_case = {'unk_token': '<unk>'}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
__snake_case = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__snake_case = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a (self : Optional[int] , **a__ : Optional[Any] ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **SCREAMING_SNAKE_CASE_ )
def a (self : str , **a__ : List[str] ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **SCREAMING_SNAKE_CASE_ )
def a (self : Tuple , **a__ : Tuple ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def a (self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = self.get_image_processor()
__snake_case = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
__snake_case = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ )
__snake_case = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
__snake_case = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def a (self : List[str] ):
"""simple docstring"""
__snake_case = 'google/owlvit-base-patch32'
__snake_case = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = ['cat', 'nasa badge']
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def a (self : Tuple ):
"""simple docstring"""
__snake_case = 'google/owlvit-base-patch32'
__snake_case = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = [['cat', 'nasa badge'], ['person']]
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = 16
__snake_case = len(SCREAMING_SNAKE_CASE_ )
__snake_case = max([len(SCREAMING_SNAKE_CASE_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def a (self : str ):
"""simple docstring"""
__snake_case = 'google/owlvit-base-patch32'
__snake_case = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = ['cat', 'nasa badge']
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = 16
__snake_case = inputs['input_ids']
__snake_case = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = self.prepare_image_inputs()
__snake_case = self.prepare_image_inputs()
__snake_case = processor(images=SCREAMING_SNAKE_CASE_ , query_images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 592 |
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class snake_case__ ( _UpperCAmelCase ):
_lowerCAmelCase ='poolformer'
def __init__( self : List[str] , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : Optional[Any]=1_6 , _lowerCamelCase : Tuple=1_6 , _lowerCamelCase : int=3 , _lowerCamelCase : List[str]=4.0 , _lowerCamelCase : List[str]=[2, 2, 6, 2] , _lowerCamelCase : Optional[int]=[6_4, 1_2_8, 3_2_0, 5_1_2] , _lowerCamelCase : Tuple=[7, 3, 3, 3] , _lowerCamelCase : str=[4, 2, 2, 2] , _lowerCamelCase : Tuple=[2, 1, 1, 1] , _lowerCamelCase : Any=4 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : str="gelu" , _lowerCamelCase : List[str]=True , _lowerCamelCase : int=1E-5 , _lowerCamelCase : Optional[Any]=0.02 , **_lowerCamelCase : Dict , ):
snake_case__ : int = num_channels
snake_case__ : Union[str, Any] = patch_size
snake_case__ : Optional[Any] = stride
snake_case__ : int = padding
snake_case__ : List[Any] = pool_size
snake_case__ : Dict = hidden_sizes
snake_case__ : str = mlp_ratio
snake_case__ : Tuple = depths
snake_case__ : Union[str, Any] = patch_sizes
snake_case__ : Dict = strides
snake_case__ : int = num_encoder_blocks
snake_case__ : Tuple = drop_path_rate
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : List[Any] = use_layer_scale
snake_case__ : Tuple = layer_scale_init_value
snake_case__ : int = initializer_range
super().__init__(**SCREAMING_SNAKE_CASE_ )
class snake_case__ ( _UpperCAmelCase ):
_lowerCAmelCase =version.parse('1.11' )
@property
def UpperCAmelCase__ ( self : List[Any] ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return 2E-3
| 170 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 451 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( _UpperCAmelCase, unittest.TestCase ):
lowerCamelCase_ = KandinskyVaaInpaintPipeline
lowerCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowerCamelCase_ = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowerCamelCase_ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase_ = False
@property
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return 32
@property
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return 32
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
return 100
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
A : str = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
A : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
A : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : List[str] = self.dummy_unet
A : str = self.dummy_movq
A : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
A : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _UpperCAmelCase ( self : int , snake_case_ : Any , snake_case_ : Dict=0 ):
"""simple docstring"""
A : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
A : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create init_image
A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
A : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
A : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
A : Dict = 0
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
A : Any = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
A : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
A : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : List[str] = 'cpu'
A : int = self.get_dummy_components()
A : Optional[int] = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
A : Union[str, Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
A : str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
A : List[str] = output.images
A : Dict = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
A : int = image[0, -3:, -3:, -1]
A : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
A : Dict = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
A : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
A : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
A : str = np.ones((768, 768) , dtype=np.floataa )
A : str = 0
A : Tuple = 'a hat'
A : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
A : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
A : Optional[Any] = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
A : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : str = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
A : Optional[Any] = pipeline(
image=SCREAMING_SNAKE_CASE_ , mask_image=SCREAMING_SNAKE_CASE_ , image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
A : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 256 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _UpperCamelCase ( UpperCamelCase ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(UpperCAmelCase_ , 0 , UpperCAmelCase_ , args=(UpperCAmelCase_) )[0]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> float:
"""simple docstring"""
return math.pow(UpperCAmelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 77 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__SCREAMING_SNAKE_CASE : str = 256047
__SCREAMING_SNAKE_CASE : int = 256145
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] =NllbTokenizer
_UpperCAmelCase : Optional[int] =NllbTokenizerFast
_UpperCAmelCase : List[Any] =True
_UpperCAmelCase : Dict =True
_UpperCAmelCase : int ={}
def _UpperCAmelCase ( self : Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ = NllbTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Dict ):
A_ = NllbTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
A_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _UpperCAmelCase ( self : int ):
A_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A_ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A_ = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A_ = tempfile.mkdtemp()
A_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ )
A_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
A_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
A_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=True
A_ = tempfile.mkdtemp()
A_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
A_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Checks everything loads correctly in the same way
A_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
# Save tokenizer rust, legacy_format=False
A_ = tempfile.mkdtemp()
A_ = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ )
A_ = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A_ = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ )
A_ = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
@require_torch
def _UpperCAmelCase ( self : Dict ):
if not self.test_seqaseq:
return
A_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
A_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
A_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
A_ = tokenizer.prepare_seqaseq_batch(
src_texts=SCREAMING_SNAKE_CASE_ , tgt_texts=SCREAMING_SNAKE_CASE_ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
A_ = tokenizer.prepare_seqaseq_batch(
SCREAMING_SNAKE_CASE_ , tgt_texts=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
A_ = tokenizer.prepare_seqaseq_batch(
src_texts=SCREAMING_SNAKE_CASE_ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , SCREAMING_SNAKE_CASE_ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : Any ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A_ = [AddedToken("<special>" , lstrip=SCREAMING_SNAKE_CASE_ )]
A_ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A_ = tokenizer_r.encode("Hey this is a <special> token" )
A_ = tokenizer_r.encode("<special>" , add_special_tokens=SCREAMING_SNAKE_CASE_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A_ = self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A_ = self.tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A_ = tokenizer_p.encode("Hey this is a <special> token" )
A_ = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] ='facebook/nllb-200-distilled-600M'
_UpperCAmelCase : List[str] =[
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_UpperCAmelCase : List[str] =[
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_UpperCAmelCase : Tuple =[
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def _UpperCAmelCase ( cls : Union[str, Any] ):
A_ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
A_ = 1
return cls
def _UpperCAmelCase ( self : List[str] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def _UpperCAmelCase ( self : int ):
A_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[int] ):
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
# fmt: off
A_ = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
A_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
A_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Optional[Any] ):
A_ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
A_ = 10
A_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : int ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def _UpperCAmelCase ( self : Union[str, Any] ):
A_ = tempfile.mkdtemp()
A_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
A_ = NllbTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def _UpperCAmelCase ( self : Union[str, Any] ):
A_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
A_ = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
A_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCAmelCase ( self : Optional[Any] ):
A_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors="pt" )
A_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=10 , return_tensors="pt" )
A_ = targets['input_ids']
A_ = shift_tokens_right(
SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self : Any ):
A_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def _UpperCAmelCase ( self : Tuple ):
A_ = True
A_ = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
A_ = False
A_ = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 452 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ : Dict = TaTokenizerFast
A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
"""simple docstring"""
from math import loga
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(UpperCAmelCase_, UpperCAmelCase_ ):
raise TypeError("Input value must be a \'int\' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =tempfile.mkdtemp()
_lowercase =BlipImageProcessor()
_lowercase =GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model')
_lowercase =BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert')
_lowercase =InstructBlipProcessor(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
processor.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self :Optional[int], **snake_case :List[Any]):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_).tokenizer
def UpperCamelCase__ ( self :Dict, **snake_case :Dict):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_).image_processor
def UpperCamelCase__ ( self :int, **snake_case :List[Any]):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_).qformer_tokenizer
def UpperCamelCase__ ( self :int):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =[np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowercase =[Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_, 0, -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =InstructBlipProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), )
processor.save_pretrained(self.tmpdirname)
_lowercase =self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
_lowercase =self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_, padding_value=1.0)
_lowercase =InstructBlipProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=SCREAMING_SNAKE_CASE_, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, SCREAMING_SNAKE_CASE_)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, SCREAMING_SNAKE_CASE_)
self.assertIsInstance(processor.qformer_tokenizer, SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_, qformer_tokenizer=SCREAMING_SNAKE_CASE_)
_lowercase =self.prepare_image_inputs()
_lowercase =image_processor(SCREAMING_SNAKE_CASE_, return_tensors='np')
_lowercase =processor(images=SCREAMING_SNAKE_CASE_, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_, qformer_tokenizer=SCREAMING_SNAKE_CASE_)
_lowercase ='lower newer'
_lowercase =processor(text=SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer(SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_)
_lowercase =qformer_tokenizer(SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key], encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor['qformer_' + key])
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_, qformer_tokenizer=SCREAMING_SNAKE_CASE_)
_lowercase ='lower newer'
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=SCREAMING_SNAKE_CASE_, images=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
list(inputs.keys()), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_):
processor()
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_, qformer_tokenizer=SCREAMING_SNAKE_CASE_)
_lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase =processor.batch_decode(SCREAMING_SNAKE_CASE_)
_lowercase =tokenizer.batch_decode(SCREAMING_SNAKE_CASE_)
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_, qformer_tokenizer=SCREAMING_SNAKE_CASE_)
_lowercase ='lower newer'
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=SCREAMING_SNAKE_CASE_, images=SCREAMING_SNAKE_CASE_)
self.assertListEqual(
list(inputs.keys()), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
| 181 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def a ( A__ : int ) -> str:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
_lowercase =precision
_lowercase =ceil(precision / 14 )
_lowercase =426880 * Decimal(10005 ).sqrt()
_lowercase =1
_lowercase =13591409
_lowercase =Decimal(UpperCAmelCase_ )
for k in range(1 , UpperCAmelCase_ ):
_lowercase =factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCAmelCase_ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase_ = 5_0
print(f"The first {n} digits of pi is: {pi(n)}")
| 291 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Dict = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]:
require_version(deps[pkg] , UpperCAmelCase_ )
| 13 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _a ( _UpperCAmelCase ):
'''simple docstring'''
A :Dict = 'sew'
def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.0_5 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
a__ : Union[str, Any] = hidden_size
a__ : int = feat_extract_norm
a__ : Optional[int] = feat_extract_activation
a__ : Any = list(SCREAMING_SNAKE_CASE_ )
a__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE_ )
a__ : str = list(SCREAMING_SNAKE_CASE_ )
a__ : Tuple = conv_bias
a__ : Dict = num_conv_pos_embeddings
a__ : Optional[Any] = num_conv_pos_embedding_groups
a__ : Dict = len(self.conv_dim )
a__ : Optional[Any] = num_hidden_layers
a__ : Tuple = intermediate_size
a__ : List[Any] = squeeze_factor
a__ : List[str] = hidden_act
a__ : Dict = num_attention_heads
a__ : Dict = hidden_dropout
a__ : Tuple = attention_dropout
a__ : Dict = activation_dropout
a__ : Optional[int] = feat_proj_dropout
a__ : Tuple = final_dropout
a__ : str = layerdrop
a__ : int = layer_norm_eps
a__ : int = initializer_range
a__ : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : Optional[Any] = apply_spec_augment
a__ : Tuple = mask_time_prob
a__ : Any = mask_time_length
a__ : int = mask_time_min_masks
a__ : int = mask_feature_prob
a__ : Dict = mask_feature_length
a__ : List[str] = mask_feature_min_masks
# ctc loss
a__ : Any = ctc_loss_reduction
a__ : List[Any] = ctc_zero_infinity
# sequence classification
a__ : List[Any] = use_weighted_layer_sum
a__ : Union[str, Any] = classifier_proj_size
@property
def _A ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 191 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'time_series_transformer'
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Tuple , _lowerCAmelCase : Optional[Any] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : List[Any] = "student_t" , _lowerCAmelCase : Any = "nll" , _lowerCAmelCase : Tuple = 1 , _lowerCAmelCase : Optional[int] = [1, 2, 3, 4, 5, 6, 7] , _lowerCAmelCase : Union[str, Any] = "mean" , _lowerCAmelCase : Optional[Any] = 0 , _lowerCAmelCase : Union[str, Any] = 0 , _lowerCAmelCase : str = 0 , _lowerCAmelCase : Union[str, Any] = 0 , _lowerCAmelCase : Union[str, Any] = None , _lowerCAmelCase : List[str] = None , _lowerCAmelCase : Union[str, Any] = 3_2 , _lowerCAmelCase : List[str] = 3_2 , _lowerCAmelCase : Tuple = 2 , _lowerCAmelCase : List[str] = 2 , _lowerCAmelCase : List[Any] = 2 , _lowerCAmelCase : str = 2 , _lowerCAmelCase : Tuple = True , _lowerCAmelCase : str = "gelu" , _lowerCAmelCase : Tuple = 6_4 , _lowerCAmelCase : str = 0.1 , _lowerCAmelCase : int = 0.1 , _lowerCAmelCase : List[Any] = 0.1 , _lowerCAmelCase : Optional[Any] = 0.1 , _lowerCAmelCase : Optional[int] = 0.1 , _lowerCAmelCase : Union[str, Any] = 1_0_0 , _lowerCAmelCase : Dict = 0.02 , _lowerCAmelCase : str=True , **_lowerCAmelCase : Dict , ) -> Any:
"""simple docstring"""
# time series specific configuration
snake_case_ = prediction_length
snake_case_ = context_length or prediction_length
snake_case_ = distribution_output
snake_case_ = loss
snake_case_ = input_size
snake_case_ = num_time_features
snake_case_ = lags_sequence
snake_case_ = scaling
snake_case_ = num_dynamic_real_features
snake_case_ = num_static_real_features
snake_case_ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
snake_case_ = cardinality
else:
snake_case_ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
snake_case_ = embedding_dimension
else:
snake_case_ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case_ = num_parallel_samples
# Transformer architecture configuration
snake_case_ = input_size * len(SCREAMING_SNAKE_CASE_ ) + self._number_of_features
snake_case_ = d_model
snake_case_ = encoder_attention_heads
snake_case_ = decoder_attention_heads
snake_case_ = encoder_ffn_dim
snake_case_ = decoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = decoder_layers
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = use_cache
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def lowerCAmelCase__ ( self : str ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 283 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase__ ( snake_case_ : int = 100_0000 , snake_case_ : int = 10 ) -> int:
__snake_case = defaultdict(UpperCAmelCase_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__snake_case = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__snake_case = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCAmelCase_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 592 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case__ ( _UpperCAmelCase ):
_lowerCAmelCase ='gpt_neo'
_lowerCAmelCase =['past_key_values']
_lowerCAmelCase ={'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : List[Any] , _lowerCamelCase : str=5_0_2_5_7 , _lowerCamelCase : Optional[Any]=2_0_4_8 , _lowerCamelCase : Optional[Any]=2_0_4_8 , _lowerCamelCase : Optional[Any]=2_4 , _lowerCamelCase : int=[[["global", "local"], 1_2]] , _lowerCamelCase : Any=1_6 , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[int]=2_5_6 , _lowerCamelCase : Tuple="gelu_new" , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Union[str, Any]=1E-5 , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : Any=True , _lowerCamelCase : List[Any]=5_0_2_5_6 , _lowerCamelCase : Union[str, Any]=5_0_2_5_6 , **_lowerCamelCase : int , ):
snake_case__ : List[Any] = vocab_size
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : List[Any] = num_layers
snake_case__ : Optional[Any] = num_heads
snake_case__ : int = intermediate_size
snake_case__ : int = window_size
snake_case__ : Optional[Any] = activation_function
snake_case__ : List[str] = resid_dropout
snake_case__ : Any = embed_dropout
snake_case__ : Union[str, Any] = attention_dropout
snake_case__ : Union[str, Any] = classifier_dropout
snake_case__ : Optional[int] = layer_norm_epsilon
snake_case__ : int = initializer_range
snake_case__ : Union[str, Any] = use_cache
snake_case__ : Dict = bos_token_id
snake_case__ : List[Any] = eos_token_id
snake_case__ : List[str] = attention_types
snake_case__ : Dict = self.expand_attention_types_params(SCREAMING_SNAKE_CASE_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@staticmethod
def UpperCAmelCase__ ( _lowerCamelCase : Dict ):
snake_case__ : Dict = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase__( A , A , A , A ):
import torch
snake_case__ : Optional[Any] = input.size()
snake_case__ : Dict = len(UpperCAmelCase_ )
snake_case__ : Any = shape[dimension]
snake_case__ : Tuple = torch.arange(0 , UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : Union[str, Any] = torch.div(sizedim - size , UpperCAmelCase_ , rounding_mode='floor' ) + 1
snake_case__ : int = torch.arange(UpperCAmelCase_ ) + low_indices[:min_length][:, None]
snake_case__ : Tuple = [slice(UpperCAmelCase_ )] * rank
snake_case__ : Union[str, Any] = indices
snake_case__ : Optional[int] = input[s]
snake_case__ : Optional[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCAmelCase_ )
def lowercase__( A , A ):
import torch
snake_case__ : Optional[Any] = torch.arange(1 , UpperCAmelCase_ )
snake_case__ : List[Any] = torch.remainder(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : List[Any] = remainders == 0
snake_case__ : Dict = candidates[divisor_indices]
snake_case__ : Dict = torch.max(UpperCAmelCase_ )
return largest_divisor, torch.div(UpperCAmelCase_ , UpperCAmelCase_ , rounding_mode='floor' )
class snake_case__ ( _UpperCAmelCase ):
@property
def UpperCAmelCase__ ( self : int ):
snake_case__ : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='inputs' )
snake_case__ : Optional[int] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
snake_case__ : Any = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return self._config.num_heads
def UpperCAmelCase__ ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : str = -1 , _lowerCamelCase : Dict = -1 , _lowerCamelCase : Optional[Any] = False , _lowerCamelCase : Any = None , ):
snake_case__ : List[Any] = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
snake_case__ : str = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
snake_case__ : Any = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
snake_case__ : Optional[int] = seqlen + 2
snake_case__ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case__ : List[Any] = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
snake_case__ : List[Any] = common_inputs['attention_mask']
if self.use_past:
snake_case__ : Any = ordered_inputs['attention_mask'].dtype
snake_case__ : Union[str, Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : List[Any] ):
return 1_3
| 170 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
if len(UpperCAmelCase_ ) != 32:
raise ValueError('Input must be of length 32' )
__lowerCamelCase : Dict = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:]
__lowerCamelCase : str = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = B''
for char in message:
bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' )
__lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase_ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]:
if len(UpperCAmelCase_ ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ):
__lowerCamelCase : Any = bit_string[pos : pos + 5_12]
__lowerCamelCase : Optional[int] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' )
__lowerCamelCase : Optional[int] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase_ , 2 )
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
return (a + b) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowerCamelCase : Dict = 0x67_45_23_01
__lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89
__lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe
__lowerCamelCase : Union[str, Any] = 0x10_32_54_76
__lowerCamelCase : List[str] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase_ ):
__lowerCamelCase : Dict = aa
__lowerCamelCase : Tuple = ba
__lowerCamelCase : List[Any] = ca
__lowerCamelCase : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowerCamelCase : List[str] = d ^ (b & (c ^ d))
__lowerCamelCase : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowerCamelCase : Optional[int] = c ^ (d & (b ^ c))
__lowerCamelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
__lowerCamelCase : str = b ^ c ^ d
__lowerCamelCase : Any = (3 * i + 5) % 16
else:
__lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ ))
__lowerCamelCase : int = (7 * i) % 16
__lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowerCamelCase : Optional[Any] = d
__lowerCamelCase : Tuple = c
__lowerCamelCase : Optional[int] = b
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__snake_case = logging.get_logger(__name__)
__snake_case = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
__snake_case = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__snake_case = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
__snake_case = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
__snake_case = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
__snake_case = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
__snake_case = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
__snake_case = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
__snake_case = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
__snake_case = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
__snake_case = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
__snake_case = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
__snake_case = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
__snake_case = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
__snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_MAPPING
__snake_case = auto_class_update(FlaxAutoModel)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
__snake_case = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__snake_case = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
__snake_case = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__snake_case = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__snake_case = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__snake_case = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__snake_case = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__snake_case = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__snake_case = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__snake_case = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class _a ( _BaseAutoModelClass ):
"""simple docstring"""
A_ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__snake_case = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 451 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'rwkv'
lowerCamelCase : Any = {'max_position_embeddings': 'context_length'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Tuple = context_length
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : Optional[Any] = layer_norm_epsilon
__lowerCamelCase : int = rescale_every
__lowerCamelCase : Tuple = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = XGLMConfig
lowerCamelCase_ = {}
lowerCamelCase_ = 'gelu'
def __init__( self : Dict , snake_case_ : str , snake_case_ : int=14 , snake_case_ : str=7 , snake_case_ : Any=True , snake_case_ : Optional[int]=True , snake_case_ : List[Any]=True , snake_case_ : List[str]=99 , snake_case_ : Union[str, Any]=32 , snake_case_ : List[Any]=2 , snake_case_ : List[Any]=4 , snake_case_ : str=37 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : int=0.1 , snake_case_ : Optional[int]=512 , snake_case_ : Optional[int]=0.02 , ):
"""simple docstring"""
A : int = parent
A : Optional[int] = batch_size
A : Optional[Any] = seq_length
A : Optional[int] = is_training
A : str = use_input_mask
A : Dict = use_labels
A : Union[str, Any] = vocab_size
A : List[Any] = d_model
A : List[Any] = num_hidden_layers
A : List[Any] = num_attention_heads
A : Optional[Any] = ffn_dim
A : List[Any] = activation_function
A : List[Any] = activation_dropout
A : List[Any] = attention_dropout
A : Union[str, Any] = max_position_embeddings
A : Tuple = initializer_range
A : int = None
A : int = 0
A : Tuple = 2
A : Tuple = 1
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
A : Optional[int] = None
if self.use_input_mask:
A : Any = random_attention_mask([self.batch_size, self.seq_length] )
A : str = self.get_config()
A : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : List[Any] = self.prepare_config_and_inputs()
(
A
) : str = config_and_inputs
A : Union[str, Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
lowerCamelCase_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase_ = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase_ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : str = TFXGLMModelTester(self )
A : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self : List[str] , snake_case_ : int=True ):
"""simple docstring"""
A : Optional[Any] = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
A : int = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
A : Optional[int] = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
A : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ )
@slow
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : Any = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
A : Tuple = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
A : List[Any] = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
A : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
A : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] )
A : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
A : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
A : Tuple = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
A : Any = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
A : Any = 'left'
# use different length sentences to test batching
A : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
A : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' , padding=SCREAMING_SNAKE_CASE_ )
A : Optional[int] = inputs['input_ids']
A : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
A : Optional[int] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
A : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
A : Optional[Any] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
A : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
A : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
A : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
A : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
A : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] ) | 256 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
A = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
A = typing.Union[np.floataa, int, float] # noqa: UP007
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(UpperCAmelCase_ ) - np.asarray(UpperCAmelCase_ )) ** 2 ) )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) ** (1 / 2)
if __name__ == "__main__":
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_0000 , globals=globals() , ) )
benchmark()
| 77 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Dict = XGLMConfig
lowerCamelCase : List[str] = {}
lowerCamelCase : Union[str, Any] = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any:
__lowerCamelCase : int = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = ffn_dim
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : List[Any] = activation_dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : int = None
__lowerCamelCase : int = 0
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Tuple = 1
def lowercase_ ( self ) -> Any:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : str = self.get_config()
__lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase_ ( self ) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = config_and_inputs
__lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Any = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : str = TFXGLMModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 )
def lowercase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowercase_ ( self ) -> Any:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]:
__lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__lowerCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] )
__lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = 'left'
# use different length sentences to test batching
__lowerCamelCase : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
| 13 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__SCREAMING_SNAKE_CASE : List[str] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
__SCREAMING_SNAKE_CASE : Optional[int] = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
__SCREAMING_SNAKE_CASE : Tuple = """▁"""
# Segments (not really needed)
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : Any = 2
__SCREAMING_SNAKE_CASE : Dict = 3
__SCREAMING_SNAKE_CASE : Union[str, Any] = 4
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase : Optional[int] =VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] ='left'
_UpperCAmelCase : Optional[Any] =XLNetTokenizer
def __init__( self : Any , lowerCAmelCase : int=None , lowerCAmelCase : str=None , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : List[Any]="<s>" , lowerCAmelCase : str="</s>" , lowerCAmelCase : Any="<unk>" , lowerCAmelCase : List[str]="<sep>" , lowerCAmelCase : str="<pad>" , lowerCAmelCase : str="<cls>" , lowerCAmelCase : List[Any]="<mask>" , lowerCAmelCase : Union[str, Any]=["<eop>", "<eod>"] , **lowerCAmelCase : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
A_ = 3
A_ = do_lower_case
A_ = remove_space
A_ = keep_accents
A_ = vocab_file
A_ = False if not self.vocab_file else True
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : str = None ):
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] = None ):
A_ = [self.sep_token_id]
A_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Tuple = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
A_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 452 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.