code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
def a__ ( A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = len(A__ ), len(grid[0] )
if (
min(A__, A__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE_ : Dict = 0
count += depth_first_search(A__, row + 1, A__, A__ )
count += depth_first_search(A__, row - 1, A__, A__ )
count += depth_first_search(A__, A__, col + 1, A__ )
count += depth_first_search(A__, A__, col - 1, A__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 0 |
"""simple docstring"""
__magic_name__ : dict[tuple[int, int, int], int] = {}
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase : Dict = _calculate(days - 1 , SCREAMING_SNAKE_CASE , 0 )
UpperCamelCase : Optional[int] = state_late + state_absent + state_ontime
UpperCamelCase : Any = prizestrings
return prizestrings
def UpperCamelCase (SCREAMING_SNAKE_CASE = 30 ):
return _calculate(SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 102 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(lowerCAmelCase_ , '''_dynamo''' ):
return False
return isinstance(lowerCAmelCase_ , torch._dynamo.eval_frame.OptimizedModule )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = True ) -> Dict:
_snake_case = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_snake_case = is_compiled_module(lowerCAmelCase_ )
if is_compiled:
_snake_case = model
_snake_case = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = model.module
if not keep_fpaa_wrapper:
_snake_case = getattr(lowerCAmelCase_ , '''forward''' )
_snake_case = model.__dict__.pop('''_original_forward''' , lowerCAmelCase_ )
if original_forward is not None:
while hasattr(lowerCAmelCase_ , '''__wrapped__''' ):
_snake_case = forward.__wrapped__
if forward == original_forward:
break
_snake_case = forward
if getattr(lowerCAmelCase_ , '''_converted_to_transformer_engine''' , lowerCAmelCase_ ):
convert_model(lowerCAmelCase_ , to_transformer_engine=lowerCAmelCase_ )
if is_compiled:
_snake_case = model
_snake_case = compiled_model
return model
def snake_case ( ) -> str:
PartialState().wait_for_everyone()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(lowerCAmelCase_ , lowerCAmelCase_ )
elif PartialState().local_process_index == 0:
torch.save(lowerCAmelCase_ , lowerCAmelCase_ )
@contextmanager
def snake_case ( **lowerCAmelCase_ ) -> str:
for key, value in kwargs.items():
_snake_case = str(lowerCAmelCase_ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def snake_case ( lowerCAmelCase_ ) -> Tuple:
if not hasattr(lowerCAmelCase_ , '''__qualname__''' ) and not hasattr(lowerCAmelCase_ , '''__name__''' ):
_snake_case = getattr(lowerCAmelCase_ , '''__class__''' , lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , '''__qualname__''' ):
return obj.__qualname__
if hasattr(lowerCAmelCase_ , '''__name__''' ):
return obj.__name__
return str(lowerCAmelCase_ )
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
for key, value in source.items():
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = destination.setdefault(lowerCAmelCase_ , {} )
merge_dicts(lowerCAmelCase_ , lowerCAmelCase_ )
else:
_snake_case = value
return destination
def snake_case ( lowerCAmelCase_ = None ) -> bool:
if port is None:
_snake_case = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 103 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case__ ( self ) -> Tuple:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case__ ( self ) -> Any:
A__ = ort.SessionOptions()
A__ = False
return options
def snake_case__ ( self ) -> Union[str, Any]:
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
A__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A__ = "A red cat sitting on a park bench"
A__ = np.random.RandomState(0 )
A__ = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type="np" , )
A__ = output.images
A__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A__ = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case__ ( self ) -> str:
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
A__ = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
A__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
A__ = "A red cat sitting on a park bench"
A__ = np.random.RandomState(0 )
A__ = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , mask_image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type="np" , )
A__ = output.images
A__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A__ = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 104 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
from math import ceil
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
A = list(range(0 , lowerCAmelCase__ ) )
A = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
A = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase__ )
# Missing blocks
A = [i for i in blocks if i not in device_map_blocks]
A = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCAmelCase__ ) )
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
A = list(range(lowerCAmelCase__ ) )
A = int(ceil(n_layers / len(lowerCAmelCase__ ) ) )
A = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ )]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) | 106 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict=7, UpperCamelCase__ : str=3, UpperCamelCase__ : List[Any]=30, UpperCamelCase__ : List[str]=4_00, UpperCamelCase__ : str=True, UpperCamelCase__ : Dict=None, UpperCamelCase__ : Union[str, Any]=0.9, UpperCamelCase__ : List[Any]=None, UpperCamelCase__ : Union[str, Any]=True, UpperCamelCase__ : Any=[0.5, 0.5, 0.5], UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5], ) -> Tuple:
_A = size if size is not None else {'shortest_edge': 30}
_A = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize_and_center_crop
_A = size
_A = crop_pct
_A = crop_size
_A = do_normalize
_A = image_mean
_A = image_std
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
_A = PoolFormerImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Dict ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__, 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'size' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'crop_pct' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__, 'image_std' ) )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size, {'height': 30, 'width': 30} )
_A = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84} )
def __UpperCAmelCase ( self : Optional[int] ) -> str:
pass
def __UpperCAmelCase ( self : int ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester, equal_resolution=UpperCamelCase__, torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__, torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
_A = image_processing(UpperCamelCase__, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 107 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 0 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__a: Dict = 0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__a: List[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase = WATERMARK_BITS
_UpperCAmelCase = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : torch.FloatTensor ) -> Tuple:
"""simple docstring"""
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
_UpperCAmelCase = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = [self.encoder.encode(lowerCamelCase , """dwtDct""" ) for image in images]
_UpperCAmelCase = torch.from_numpy(np.array(lowerCamelCase ) ).permute(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images | 108 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a ( lowercase ):
UpperCamelCase : Tuple = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCamelCase : int = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCamelCase : Optional[Any] = """document_qa"""
UpperCamelCase : List[Any] = AutoProcessor
UpperCamelCase : int = VisionEncoderDecoderModel
UpperCamelCase : str = ["""image""", """text"""]
UpperCamelCase : Tuple = ["""text"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
UpperCAmelCase__ : int = task_prompt.replace('{user_input}' , UpperCamelCase_ )
UpperCAmelCase__ : Tuple = self.pre_processor.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='pt' ).input_ids
UpperCAmelCase__ : List[Any] = self.pre_processor(UpperCamelCase_ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self , UpperCamelCase_ ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = self.pre_processor.batch_decode(UpperCamelCase_ )[0]
UpperCAmelCase__ : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
UpperCAmelCase__ : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
UpperCAmelCase__ : Tuple = re.sub(R'<.*?>' , '' , UpperCamelCase_ , count=1 ).strip() # remove first task start token
UpperCAmelCase__ : Any = self.pre_processor.tokenajson(UpperCamelCase_ )
return sequence["answer"]
| 110 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 0 |
# Function to print upper half of diamond (pyramid)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Any:
'''simple docstring'''
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
for i in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ):
for _ in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(SCREAMING_SNAKE_CASE_ ) # upper half
reverse_floyd(SCREAMING_SNAKE_CASE_ ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__A : List[Any] = 1
while K:
__A : Optional[int] = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__A : Any = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 130 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 0 |
"""simple docstring"""
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(SCREAMING_SNAKE_CASE_ ) )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if index == len(SCREAMING_SNAKE_CASE_ ):
return True
# Recursive Step
for i in range(SCREAMING_SNAKE_CASE_ ):
if valid_coloring(graph[index] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# Color current vertex
UpperCAmelCase = i
# Validate coloring
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , index + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = [-1] * len(SCREAMING_SNAKE_CASE_ )
if util_color(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 0 ):
return colored_vertices
return []
| 341 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : int = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = ['''MobileNetV2FeatureExtractor''']
_UpperCAmelCase : Optional[int] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
_UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
_lowercase: str = TypeVar('''T''')
_lowercase: Optional[int] = Union[List[T], Tuple[T, ...]]
_lowercase: Optional[int] = Union[T, List[T], Dict[str, T]]
_lowercase: List[str] = Union[str, bytes, os.PathLike]
| 192 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( __UpperCamelCase ) -> int:
lowerCamelCase_ ,lowerCamelCase_ = emb.weight.shape
lowerCamelCase_ = nn.Linear(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,bias=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = emb.weight.data
return lin_layer
def _UpperCamelCase ( __UpperCamelCase ) -> Any:
lowerCamelCase_ = torch.load(SCREAMING_SNAKE_CASE_ ,map_location='cpu' )
lowerCamelCase_ = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCamelCase_ = mam_aaa['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCamelCase_ = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ ,max_position_embeddings=10_24 ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,encoder_layerdrop=args.encoder_layerdrop ,decoder_layerdrop=args.decoder_layerdrop ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function='relu' ,)
lowerCamelCase_ = state_dict['decoder.embed_tokens.weight']
lowerCamelCase_ = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ ,strict=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
A_ = parser.parse_args()
A_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 42 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case : Tuple = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
snake_case : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__: Dict = "\\n Text data.\n Second line of data."
lowerCAmelCase__: str = "file"
@pytest.fixture(scope='session' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Dict:
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
SCREAMING_SNAKE_CASE_ : Any = bytes(SCREAMING_SNAKE_CASE_ , 'utf-8' )
with zstd.open(SCREAMING_SNAKE_CASE_ , 'wb' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : Tuple = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_paths[compression_format]
SCREAMING_SNAKE_CASE_ : Any = tmp_path / 'cache'
SCREAMING_SNAKE_CASE_ : Any = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
SCREAMING_SNAKE_CASE_ : Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Tuple = 'custom_cache'
SCREAMING_SNAKE_CASE_ : List[str] = 'custom_extracted_dir'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / 'custom_extracted_path'
if default_extracted:
SCREAMING_SNAKE_CASE_ : str = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE_ : Dict = xz_file
SCREAMING_SNAKE_CASE_ : Optional[int] = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
SCREAMING_SNAKE_CASE_ : List[str] = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Tuple = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
SCREAMING_SNAKE_CASE_ : Dict = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : List[str] = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
SCREAMING_SNAKE_CASE_ : Optional[int] = './__missing_file__.txt'
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Any:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_from_cache(f'tmp://{tmpfs_file}' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( ) -> Any:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : int = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get('https://huggingface.co' , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get('ftp://huggingface.co' , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get('s3://huggingface.co' , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head('s3://huggingface.co' )
| 345 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: str ) -> Optional[int]:
__magic_name__ : int = tempfile.mkdtemp()
__magic_name__ : List[str] = BlipImageProcessor()
__magic_name__ : int = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
__magic_name__ : Union[str, Any] = BlipProcessor(_UpperCamelCase , _UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Union[str, Any] , **__UpperCamelCase: Optional[int] ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).tokenizer
def lowerCAmelCase__ ( self: Optional[int] , **__UpperCamelCase: List[str] ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCamelCase ).image_processor
def lowerCAmelCase__ ( self: int ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: Dict ) -> Dict:
__magic_name__ : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__magic_name__ : str = [Image.fromarray(np.moveaxis(_UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
__magic_name__ : Optional[int] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__magic_name__ : Optional[Any] = self.get_image_processor(do_normalize=_UpperCamelCase , padding_value=1.0 )
__magic_name__ : Optional[int] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCamelCase )
def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]:
__magic_name__ : str = self.get_image_processor()
__magic_name__ : List[Any] = self.get_tokenizer()
__magic_name__ : int = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__magic_name__ : List[Any] = self.prepare_image_inputs()
__magic_name__ : Optional[Any] = image_processor(_UpperCamelCase , return_tensors="np" )
__magic_name__ : Any = processor(images=_UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
__magic_name__ : Tuple = self.get_image_processor()
__magic_name__ : Tuple = self.get_tokenizer()
__magic_name__ : List[Any] = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__magic_name__ : int = "lower newer"
__magic_name__ : Any = processor(text=_UpperCamelCase )
__magic_name__ : str = tokenizer(_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self: Dict ) -> Optional[int]:
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : List[Any] = self.get_tokenizer()
__magic_name__ : str = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__magic_name__ : Optional[int] = "lower newer"
__magic_name__ : Any = self.prepare_image_inputs()
__magic_name__ : Union[str, Any] = processor(text=_UpperCamelCase , images=_UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(_UpperCamelCase ):
processor()
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
__magic_name__ : Any = self.get_image_processor()
__magic_name__ : int = self.get_tokenizer()
__magic_name__ : Optional[Any] = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__magic_name__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ : int = processor.batch_decode(_UpperCamelCase )
__magic_name__ : int = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def lowerCAmelCase__ ( self: Any ) -> int:
__magic_name__ : List[Any] = self.get_image_processor()
__magic_name__ : int = self.get_tokenizer()
__magic_name__ : Any = BlipProcessor(tokenizer=_UpperCamelCase , image_processor=_UpperCamelCase )
__magic_name__ : Dict = "lower newer"
__magic_name__ : Optional[int] = self.prepare_image_inputs()
__magic_name__ : Dict = processor(text=_UpperCamelCase , images=_UpperCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) | 436 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, A_, A_=7, A_=3, A_=18, A_=30, A_=400, A_=True, A_=None, A_=True, A_=None, A_=True, A_=[0.5, 0.5, 0.5], A_=[0.5, 0.5, 0.5], ) -> Any:
UpperCAmelCase__ =size if size is not None else {"shortest_edge": 18}
UpperCAmelCase__ =crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =image_size
UpperCAmelCase__ =min_resolution
UpperCAmelCase__ =max_resolution
UpperCAmelCase__ =do_resize
UpperCAmelCase__ =size
UpperCAmelCase__ =do_center_crop
UpperCAmelCase__ =crop_size
UpperCAmelCase__ =do_normalize
UpperCAmelCase__ =image_mean
UpperCAmelCase__ =image_std
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case_ ( A__, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =LevitImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase, "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase, "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase, "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase, "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase, "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCamelCase, "size" ) )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase__ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18} )
UpperCAmelCase__ =self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84} )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ =prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase, Image.Image )
# Test not batched input
UpperCAmelCase__ =image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCAmelCase__ =image_processing(_UpperCamelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def __UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ =prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCamelCase, numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase, np.ndarray )
# Test not batched input
UpperCAmelCase__ =image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCAmelCase__ =image_processing(_UpperCamelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def __UpperCAmelCase ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase__ =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ =prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCamelCase, torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase, torch.Tensor )
# Test not batched input
UpperCAmelCase__ =image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCAmelCase__ =image_processing(_UpperCamelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
| 625 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , a , a=13 , a=32 , a=3 , a=4 , a=[10, 20, 30, 40] , a=[2, 2, 3, 2] , a=True , a=True , a=37 , a="gelu" , a=10 , a=0.02 , a=["stage2", "stage3", "stage4"] , a=[2, 3, 4] , a=None , ):
"""simple docstring"""
snake_case_ :Tuple = parent
snake_case_ :Union[str, Any] = batch_size
snake_case_ :Any = image_size
snake_case_ :int = num_channels
snake_case_ :Optional[int] = num_stages
snake_case_ :Optional[int] = hidden_sizes
snake_case_ :str = depths
snake_case_ :List[Any] = is_training
snake_case_ :Any = use_labels
snake_case_ :List[str] = intermediate_size
snake_case_ :int = hidden_act
snake_case_ :Optional[int] = num_labels
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :str = out_features
snake_case_ :Tuple = out_indices
snake_case_ :Optional[Any] = scope
def _a ( self ):
"""simple docstring"""
snake_case_ :str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ :Dict = None
if self.use_labels:
snake_case_ :Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ :Optional[int] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _a ( self , a , a , a ):
"""simple docstring"""
snake_case_ :Optional[Any] = ConvNextVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ :List[str] = model(_UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _a ( self , a , a , a ):
"""simple docstring"""
snake_case_ :Any = ConvNextVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ :Optional[Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , a , a , a ):
"""simple docstring"""
snake_case_ :Dict = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ :Dict = model(_UpperCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ :Dict = None
snake_case_ :Optional[int] = ConvNextVaBackbone(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
snake_case_ :Optional[Any] = model(_UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ :Tuple = config_and_inputs
snake_case_ :Any = {"pixel_values": pixel_values}
return config, inputs_dict
def _a ( self ):
"""simple docstring"""
snake_case_ :List[str] = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ :Optional[Any] = config_and_inputs
snake_case_ :List[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (A__ ,A__ ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a__ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _a ( self ):
"""simple docstring"""
snake_case_ :int = ConvNextVaModelTester(self )
snake_case_ :Union[str, Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def _a ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
"""simple docstring"""
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _a ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _a ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _a ( self ):
"""simple docstring"""
pass
def _a ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ :List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ :str = True
if model_class.__name__ in [
*get_values(_UpperCamelCase ),
*get_values(_UpperCamelCase ),
]:
continue
snake_case_ :Any = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.train()
snake_case_ :Tuple = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
snake_case_ :Optional[Any] = model(**_UpperCamelCase ).loss
loss.backward()
def _a ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
snake_case_ , snake_case_ :str = self.model_tester.prepare_config_and_inputs_with_labels()
snake_case_ :Tuple = False
snake_case_ :List[str] = True
if (
model_class.__name__
in [*get_values(_UpperCamelCase ), *get_values(_UpperCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case_ :Optional[int] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
snake_case_ :int = self._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase )
snake_case_ :Any = model(**_UpperCamelCase ).loss
loss.backward()
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :List[Any] = model_class(_UpperCamelCase )
snake_case_ :Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ :Tuple = [*signature.parameters.keys()]
snake_case_ :List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def _a ( self ):
"""simple docstring"""
snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _a ( self ):
"""simple docstring"""
def check_hidden_states_output(a , a , a ):
snake_case_ :Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
snake_case_ :Any = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ :str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ :Dict = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ , snake_case_ :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ :Union[str, Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ :Optional[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _a ( self ):
"""simple docstring"""
snake_case_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
@slow
def _a ( self ):
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ :Union[str, Any] = ConvNextVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def A ( ):
"""simple docstring"""
snake_case_ :Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(_UpperCamelCase )
snake_case_ :Dict = self.default_image_processor
snake_case_ :Optional[Any] = prepare_img()
snake_case_ :Optional[int] = preprocessor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case_ :Optional[Any] = model(**_UpperCamelCase )
# verify the logits
snake_case_ :Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
snake_case_ :Dict = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
| 584 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_:Optional[Any] = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_:str = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_:int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 662 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
UpperCAmelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
assert _test_patching.open is open
UpperCAmelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , SCREAMING_SNAKE_CASE_ ):
pass
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , SCREAMING_SNAKE_CASE_ ) is None
with patch_submodule(_test_patching , '''len''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = '''__test_patch_submodule_start_and_stop_mock__'''
UpperCAmelCase = patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
UpperCAmelCase = '''__test_patch_submodule_successive_join__'''
UpperCAmelCase = '''__test_patch_submodule_successive_dirname__'''
UpperCAmelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE_ ):
pass
| 130 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _a ( _snake_case ):
"""simple docstring"""
def decorator(_snake_case ):
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def _a ( *_snake_case ):
"""simple docstring"""
def decorator(_snake_case ):
UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , """handle_key""" , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class lowerCamelCase__ ( A__ ):
def __new__( cls ,A ,A ,A ):
UpperCAmelCase = super().__new__(cls ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
if not hasattr(_UpperCamelCase ,"""key_handler""" ):
setattr(_UpperCamelCase ,"""key_handler""" ,{} )
setattr(_UpperCamelCase ,"""handle_input""" ,KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase = getattr(_UpperCamelCase ,"""handle_key""" ,[] )
for key in handled_keys:
UpperCAmelCase = value
return new_cls
@staticmethod
def _UpperCamelCase ( cls ):
UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase = ord(_UpperCamelCase )
UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
UpperCAmelCase = char
return handler(cls )
else:
return None
def _a ( cls ):
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 341 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 0 |
'''simple docstring'''
import heapq
import sys
import numpy as np
_UpperCAmelCase : Optional[Any] = tuple[int, int]
class __magic_name__ :
def __init__( self ):
lowercase =[]
lowercase =set()
def _A( self ):
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _A( self ):
return len(self.elements ) == 0
def _A( self , snake_case_ , snake_case_ ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_UpperCamelCase )
else:
# update
# print("update", item)
lowercase =[]
((lowercase) , (lowercase)) =heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowercase) , (lowercase)) =heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _A( self , snake_case_ ):
if item in self.set:
self.set.remove(_UpperCamelCase )
lowercase =[]
((lowercase) , (lowercase)) =heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowercase) , (lowercase)) =heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _A( self ):
return self.elements[0][1]
def _A( self ):
((lowercase) , (lowercase)) =heapq.heappop(self.elements )
self.set.remove(_UpperCamelCase )
return (priority, item)
def UpperCamelCase ( lowercase_ : TPos , lowercase_ : TPos ) -> Any:
'''simple docstring'''
lowercase =np.array(SCREAMING_SNAKE_CASE_ )
lowercase =np.array(SCREAMING_SNAKE_CASE_ )
return np.linalg.norm(a - b )
def UpperCamelCase ( lowercase_ : TPos , lowercase_ : TPos ) -> int:
'''simple docstring'''
return consistent_heuristic(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) // t
def UpperCamelCase ( lowercase_ : TPos , lowercase_ : TPos ) -> Tuple:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def UpperCamelCase ( lowercase_ : TPos , lowercase_ : int , lowercase_ : TPos , lowercase_ : dict[TPos, float] ) -> str:
'''simple docstring'''
lowercase =g_function[start] + Wa * heuristics[i](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return ans
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict ) -> Dict:
'''simple docstring'''
lowercase =np.chararray((n, n) )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
lowercase ='''*'''
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (j, (n - 1) - i) in blocks:
lowercase ='''#'''
lowercase ='''-'''
lowercase =back_pointer[goal]
while x != start:
((lowercase) , (lowercase)) =x
# print(x)
lowercase ='''-'''
lowercase =back_pointer[x]
lowercase ='''-'''
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
lowercase =back_pointer[goal]
while x != start:
print(SCREAMING_SNAKE_CASE_ , end=''' ''' )
lowercase =back_pointer[x]
print(SCREAMING_SNAKE_CASE_ )
sys.exit()
def UpperCamelCase ( lowercase_ : TPos ) -> Tuple:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def UpperCamelCase ( lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : List[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(SCREAMING_SNAKE_CASE_ ):
open_list[itera].remove_element(SCREAMING_SNAKE_CASE_ )
# print("s", s)
# print("j", j)
((lowercase) , (lowercase)) =s
lowercase =(x - 1, y)
lowercase =(x + 1, y)
lowercase =(x, y + 1)
lowercase =(x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(SCREAMING_SNAKE_CASE_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(SCREAMING_SNAKE_CASE_ )
lowercase =-1
lowercase =float('''inf''' )
if valid(SCREAMING_SNAKE_CASE_ ) and g_function[neighbours] > g_function[s] + 1:
lowercase =g_function[s] + 1
lowercase =s
if neighbours not in close_list_anchor:
open_list[0].put(SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if neighbours not in close_list_inad:
for var in range(1 , SCREAMING_SNAKE_CASE_ ):
if key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) <= Wa * key(
SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
open_list[j].put(
SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowercase =[]
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(1_5 , 2_0 ):
some_list.append((x, 1_7) )
for x in range(1_0 , 1_9 ):
for y in range(1 , 1_5 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(1_2 , 1_9 ):
some_list.append((x, y) )
for x in range(3 , 1_3 ):
for y in range(1_6 , 1_9 ):
some_list.append((x, y) )
return some_list
_UpperCAmelCase : int = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_UpperCAmelCase : Tuple = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_UpperCAmelCase : Dict = make_common_ground()
_UpperCAmelCase : Any = blocks_blk
# hyper parameters
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Optional[int] = 20
_UpperCAmelCase : List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
_UpperCAmelCase : Dict = (0, 0)
_UpperCAmelCase : Optional[int] = (n - 1, n - 1)
_UpperCAmelCase : str = 1
def UpperCamelCase ( lowercase_ : TPos , lowercase_ : TPos , lowercase_ : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase ={start: 0, goal: float('''inf''' )}
lowercase ={start: -1, goal: -1}
lowercase =[]
lowercase =set()
for i in range(SCREAMING_SNAKE_CASE_ ):
open_list.append(PriorityQueue() )
open_list[i].put(SCREAMING_SNAKE_CASE_ , key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase =[]
lowercase =[]
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowercase , lowercase =open_list[i].top_show()
visited.add(SCREAMING_SNAKE_CASE_ )
expand_state(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
close_list_inad.append(SCREAMING_SNAKE_CASE_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowercase =open_list[0].top_show()
visited.add(SCREAMING_SNAKE_CASE_ )
expand_state(
SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
close_list_anchor.append(SCREAMING_SNAKE_CASE_ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 72 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: List[Any] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class lowerCamelCase__ ( A__ ):
UpperCamelCase__ ="""lilt"""
def __init__( self : Optional[int] , lowercase__ : List[str]=3_05_22 , lowercase__ : Tuple=7_68 , lowercase__ : str=12 , lowercase__ : Any=12 , lowercase__ : Tuple=30_72 , lowercase__ : Union[str, Any]="gelu" , lowercase__ : str=0.1 , lowercase__ : Optional[Any]=0.1 , lowercase__ : str=5_12 , lowercase__ : Union[str, Any]=2 , lowercase__ : Union[str, Any]=0.0_2 , lowercase__ : List[Any]=1e-12 , lowercase__ : Union[str, Any]=0 , lowercase__ : Optional[Any]="absolute" , lowercase__ : Optional[int]=None , lowercase__ : str=4 , lowercase__ : Tuple=10_24 , **lowercase__ : Any , ):
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = channel_shrink_ratio
_lowerCAmelCase = max_ad_position_embeddings
| 192 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
import numpy as np
import datasets
A_ = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
A_ = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
A_ = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ),
} ) , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
lowerCamelCase_ = np.array(_UpperCamelCase )
lowerCamelCase_ = np.array(_UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('Expected `X` to be a 2D vector' )
if len(reference_distribution.shape ) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' )
# Get mahalanobis distance for each prediction
lowerCamelCase_ = X - np.mean(_UpperCamelCase )
lowerCamelCase_ = np.cov(reference_distribution.T )
try:
lowerCamelCase_ = np.linalg.inv(_UpperCamelCase )
except np.linalg.LinAlgError:
lowerCamelCase_ = np.linalg.pinv(_UpperCamelCase )
lowerCamelCase_ = np.dot(_UpperCamelCase , _UpperCamelCase )
lowerCamelCase_ = np.dot(_UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 42 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case : str = logging.get_logger(__name__)
# General docstring
snake_case : str = '''RegNetConfig'''
# Base docstring
snake_case : Tuple = '''facebook/regnet-y-040'''
snake_case : Optional[Any] = [1, 10_88, 7, 7]
# Image classification docstring
snake_case : Dict = '''facebook/regnet-y-040'''
snake_case : Tuple = '''tabby, tabby cat'''
snake_case : Optional[int] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class snake_case_ (nn.Module ):
def __init__( self :Any ,__snake_case :str ,__snake_case :List[Any] ,__snake_case :List[str] = 3 ,__snake_case :Tuple = 1 ,__snake_case :List[str] = 1 ,__snake_case :str = "relu" ,) -> List[Any]:
super().__init__()
a__ = nn.Convad(
_UpperCamelCase ,_UpperCamelCase ,kernel_size=_UpperCamelCase ,stride=_UpperCamelCase ,padding=kernel_size // 2 ,groups=_UpperCamelCase ,bias=_UpperCamelCase ,)
a__ = nn.BatchNormad(_UpperCamelCase )
a__ = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCamelCase__( self :Optional[Any] ,__snake_case :str ) -> str:
a__ = self.convolution(_UpperCamelCase )
a__ = self.normalization(_UpperCamelCase )
a__ = self.activation(_UpperCamelCase )
return hidden_state
class snake_case_ (nn.Module ):
def __init__( self :Tuple ,__snake_case :List[str] ) -> Tuple:
super().__init__()
a__ = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
a__ = config.num_channels
def lowerCamelCase__( self :List[Any] ,__snake_case :Any ) -> Optional[Any]:
a__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
a__ = self.embedder(_UpperCamelCase )
return hidden_state
class snake_case_ (nn.Module ):
def __init__( self :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :Optional[int] = 2 ) -> List[str]:
super().__init__()
a__ = nn.Convad(_UpperCamelCase ,_UpperCamelCase ,kernel_size=1 ,stride=_UpperCamelCase ,bias=_UpperCamelCase )
a__ = nn.BatchNormad(_UpperCamelCase )
def lowerCamelCase__( self :Any ,__snake_case :str ) -> Any:
a__ = self.convolution(_UpperCamelCase )
a__ = self.normalization(_UpperCamelCase )
return hidden_state
class snake_case_ (nn.Module ):
def __init__( self :Union[str, Any] ,__snake_case :Dict ,__snake_case :Optional[int] ) -> List[str]:
super().__init__()
a__ = nn.AdaptiveAvgPoolad((1, 1) )
a__ = nn.Sequential(
nn.Convad(_UpperCamelCase ,_UpperCamelCase ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_UpperCamelCase ,_UpperCamelCase ,kernel_size=1 ) ,nn.Sigmoid() ,)
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[Any] ) -> Optional[int]:
# b c h w -> b c 1 1
a__ = self.pooler(_UpperCamelCase )
a__ = self.attention(_UpperCamelCase )
a__ = hidden_state * attention
return hidden_state
class snake_case_ (nn.Module ):
def __init__( self :Optional[Any] ,__snake_case :Optional[Any] ,__snake_case :List[str] ,__snake_case :int ,__snake_case :str = 1 ) -> Tuple:
super().__init__()
a__ = in_channels != out_channels or stride != 1
a__ = max(1 ,out_channels // config.groups_width )
a__ = (
RegNetShortCut(_UpperCamelCase ,_UpperCamelCase ,stride=_UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
a__ = nn.Sequential(
RegNetConvLayer(_UpperCamelCase ,_UpperCamelCase ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_UpperCamelCase ,_UpperCamelCase ,stride=_UpperCamelCase ,groups=_UpperCamelCase ,activation=config.hidden_act ) ,RegNetConvLayer(_UpperCamelCase ,_UpperCamelCase ,kernel_size=1 ,activation=_UpperCamelCase ) ,)
a__ = ACTaFN[config.hidden_act]
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any] ) -> List[str]:
a__ = hidden_state
a__ = self.layer(_UpperCamelCase )
a__ = self.shortcut(_UpperCamelCase )
hidden_state += residual
a__ = self.activation(_UpperCamelCase )
return hidden_state
class snake_case_ (nn.Module ):
def __init__( self :str ,__snake_case :Any ,__snake_case :Optional[Any] ,__snake_case :List[str] ,__snake_case :Optional[int] = 1 ) -> Dict:
super().__init__()
a__ = in_channels != out_channels or stride != 1
a__ = max(1 ,out_channels // config.groups_width )
a__ = (
RegNetShortCut(_UpperCamelCase ,_UpperCamelCase ,stride=_UpperCamelCase ) if should_apply_shortcut else nn.Identity()
)
a__ = nn.Sequential(
RegNetConvLayer(_UpperCamelCase ,_UpperCamelCase ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_UpperCamelCase ,_UpperCamelCase ,stride=_UpperCamelCase ,groups=_UpperCamelCase ,activation=config.hidden_act ) ,RegNetSELayer(_UpperCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_UpperCamelCase ,_UpperCamelCase ,kernel_size=1 ,activation=_UpperCamelCase ) ,)
a__ = ACTaFN[config.hidden_act]
def lowerCamelCase__( self :Any ,__snake_case :Optional[int] ) -> Optional[int]:
a__ = hidden_state
a__ = self.layer(_UpperCamelCase )
a__ = self.shortcut(_UpperCamelCase )
hidden_state += residual
a__ = self.activation(_UpperCamelCase )
return hidden_state
class snake_case_ (nn.Module ):
def __init__( self :List[Any] ,__snake_case :Tuple ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] = 2 ,__snake_case :List[Any] = 2 ,) -> List[Any]:
super().__init__()
a__ = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
a__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,stride=_UpperCamelCase ,) ,*[layer(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ) for _ in range(depth - 1 )] ,)
def lowerCamelCase__( self :Dict ,__snake_case :Tuple ) -> Tuple:
a__ = self.layers(_UpperCamelCase )
return hidden_state
class snake_case_ (nn.Module ):
def __init__( self :Optional[int] ,__snake_case :Tuple ) -> List[Any]:
super().__init__()
a__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
a__ = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCamelCase ,config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,depth=_UpperCamelCase ) )
def lowerCamelCase__( self :Any ,__snake_case :Union[str, Any] ,__snake_case :Optional[int] = False ,__snake_case :List[str] = True ) -> Optional[Any]:
a__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a__ = hidden_states + (hidden_state,)
a__ = stage_module(_UpperCamelCase )
if output_hidden_states:
a__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCamelCase ,hidden_states=_UpperCamelCase )
class snake_case_ (A__ ):
UpperCAmelCase__ : List[str] = RegNetConfig
UpperCAmelCase__ : Any = """regnet"""
UpperCAmelCase__ : Optional[int] = """pixel_values"""
UpperCAmelCase__ : Any = True
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :List[Any] ) -> Any:
if isinstance(_UpperCamelCase ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(_UpperCamelCase ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def lowerCamelCase__( self :Any ,__snake_case :Optional[int] ,__snake_case :str=False ) -> int:
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
a__ = value
snake_case : List[str] = r'''\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'''
snake_case : Any = r'''\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'''
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class snake_case_ (A__ ):
def __init__( self :int ,__snake_case :List[str] ) -> Tuple:
super().__init__(_UpperCamelCase )
a__ = config
a__ = RegNetEmbeddings(_UpperCamelCase )
a__ = RegNetEncoder(_UpperCamelCase )
a__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_UpperCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCamelCase__( self :str ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any] = None ,__snake_case :List[str] = None ) -> List[Any]:
a__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a__ = return_dict if return_dict is not None else self.config.use_return_dict
a__ = self.embedder(_UpperCamelCase )
a__ = self.encoder(
_UpperCamelCase ,output_hidden_states=_UpperCamelCase ,return_dict=_UpperCamelCase )
a__ = encoder_outputs[0]
a__ = self.pooler(_UpperCamelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCamelCase ,pooler_output=_UpperCamelCase ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , A__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class snake_case_ (A__ ):
def __init__( self :int ,__snake_case :Tuple ) -> List[Any]:
super().__init__(_UpperCamelCase )
a__ = config.num_labels
a__ = RegNetModel(_UpperCamelCase )
# classification head
a__ = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_UpperCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCamelCase__( self :Dict ,__snake_case :Tuple = None ,__snake_case :Optional[int] = None ,__snake_case :Optional[Any] = None ,__snake_case :Dict = None ,) -> Any:
a__ = return_dict if return_dict is not None else self.config.use_return_dict
a__ = self.regnet(_UpperCamelCase ,output_hidden_states=_UpperCamelCase ,return_dict=_UpperCamelCase )
a__ = outputs.pooler_output if return_dict else outputs[1]
a__ = self.classifier(_UpperCamelCase )
a__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
a__ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
a__ = 'single_label_classification'
else:
a__ = 'multi_label_classification'
if self.config.problem_type == "regression":
a__ = MSELoss()
if self.num_labels == 1:
a__ = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
a__ = loss_fct(_UpperCamelCase ,_UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
a__ = CrossEntropyLoss()
a__ = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
a__ = BCEWithLogitsLoss()
a__ = loss_fct(_UpperCamelCase ,_UpperCamelCase )
if not return_dict:
a__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCamelCase ,logits=_UpperCamelCase ,hidden_states=outputs.hidden_states )
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
lowerCAmelCase__: Dict = logging.get_logger(__name__)
lowerCAmelCase__: Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowerCAmelCase__: Dict = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
lowerCAmelCase__: str = {
"facebook/blenderbot_small-90M": 512,
}
class snake_case_ ( A__ ):
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Optional[int] = BlenderbotSmallTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase="<|endoftext|>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=_UpperCamelCase , merges=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase , ) , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE_ : Optional[int] = add_prefix_space
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
SCREAMING_SNAKE_CASE_ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 345 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_SCREAMING_SNAKE_CASE : Dict = 16
_SCREAMING_SNAKE_CASE : Optional[int] = 32
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ = 16 ):
"""simple docstring"""
__magic_name__ : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
__magic_name__ : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ : List[str] = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ : int = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ : Dict = 16
elif accelerator.mixed_precision != "no":
__magic_name__ : Dict = 8
else:
__magic_name__ : List[str] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
__magic_name__ : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
__magic_name__ : str = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_SCREAMING_SNAKE_CASE : Dict = mocked_dataloaders # noqa: F811
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
__magic_name__ : Union[str, Any] = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__magic_name__ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__magic_name__ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ : Optional[Any] = config["lr"]
__magic_name__ : List[str] = int(config["num_epochs"] )
__magic_name__ : int = int(config["seed"] )
__magic_name__ : str = int(config["batch_size"] )
set_seed(SCREAMING_SNAKE_CASE_ )
__magic_name__ , __magic_name__ : List[str] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__magic_name__ : Tuple = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
__magic_name__ : Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ : List[str] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ : Any = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ : Optional[int] = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
__magic_name__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__magic_name__ : Optional[Any] = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split("." )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__magic_name__ : List[str] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ : List[str] = model(**SCREAMING_SNAKE_CASE_ )
__magic_name__ : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__magic_name__ : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ : Dict = model(**SCREAMING_SNAKE_CASE_ )
__magic_name__ : int = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
__magic_name__ : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
"epoch": epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=SCREAMING_SNAKE_CASE_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__magic_name__ : Dict = parser.parse_args()
__magic_name__ : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 436 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 0 |
def _UpperCAmelCase ( A = 1000 ):
'''simple docstring'''
UpperCAmelCase__ =3
UpperCAmelCase__ =0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 625 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :Any = []
def _a ( self , a ):
"""simple docstring"""
return self.node_position[vertex]
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :str = pos
def _a ( self , a , a , a , a ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
snake_case_ :Optional[int] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
snake_case_ :Tuple = 2 * start + 1
else:
snake_case_ :str = 2 * start + 2
if heap[smallest_child] < heap[start]:
snake_case_ , snake_case_ :Optional[int] = heap[smallest_child], positions[smallest_child]
snake_case_ , snake_case_ :Optional[Any] = (
heap[start],
positions[start],
)
snake_case_ , snake_case_ :Tuple = temp, tempa
snake_case_ :Optional[int] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _UpperCamelCase )
self.top_to_bottom(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def _a ( self , a , a , a , a ):
"""simple docstring"""
snake_case_ :Tuple = position[index]
while index != 0:
snake_case_ :Tuple = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
snake_case_ :int = heap[parent]
snake_case_ :List[str] = position[parent]
self.set_position(position[parent] , _UpperCamelCase )
else:
snake_case_ :int = val
snake_case_ :Any = temp
self.set_position(_UpperCamelCase , _UpperCamelCase )
break
snake_case_ :List[str] = parent
else:
snake_case_ :Dict = val
snake_case_ :Any = temp
self.set_position(_UpperCamelCase , 0 )
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :List[str] = len(_UpperCamelCase ) // 2 - 1
for i in range(_UpperCamelCase , -1 , -1 ):
self.top_to_bottom(_UpperCamelCase , _UpperCamelCase , len(_UpperCamelCase ) , _UpperCamelCase )
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :Optional[Any] = positions[0]
snake_case_ :Union[str, Any] = sys.maxsize
self.top_to_bottom(_UpperCamelCase , 0 , len(_UpperCamelCase ) , _UpperCamelCase )
return temp
def A ( _A ):
"""simple docstring"""
snake_case_ :List[Any] = Heap()
snake_case_ :List[Any] = [0] * len(SCREAMING_SNAKE_CASE_ )
snake_case_ :Any = [-1] * len(SCREAMING_SNAKE_CASE_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
snake_case_ :int = [] # Heap of Distance of vertices from their neighboring vertex
snake_case_ :Optional[int] = []
for vertex in range(len(SCREAMING_SNAKE_CASE_ ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE_ )
heap.node_position.append(SCREAMING_SNAKE_CASE_ )
snake_case_ :List[str] = []
snake_case_ :str = 1
snake_case_ :Union[str, Any] = sys.maxsize
for neighbor, distance in adjacency_list[0]:
snake_case_ :Tuple = 0
snake_case_ :Tuple = distance
heap.heapify(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
for _ in range(1, len(SCREAMING_SNAKE_CASE_ ) ):
snake_case_ :List[str] = heap.delete_minimum(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
snake_case_ :str = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE_ )]
):
snake_case_ :Dict = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE_, heap.get_position(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
snake_case_ :List[str] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCAmelCase : Tuple = int(input('Enter number of edges: ').strip())
__UpperCAmelCase : Tuple = defaultdict(list)
for _ in range(edges_number):
__UpperCAmelCase : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 584 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
import doctest
from collections import deque
import numpy as np
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self ):
A : Any = [2, 1, 2, -1]
A : Union[str, Any] = [1, 2, 3, 4]
def _lowerCAmelCase ( self ):
A : Tuple = len(self.first_signal )
A : str = len(self.second_signal )
A : Union[str, Any] = max(_UpperCamelCase, _UpperCamelCase )
# create a zero matrix of max_length x max_length
A : int = [[0] * max_length for i in range(_UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCamelCase ):
A : Tuple = deque(self.second_signal )
rotated_signal.rotate(_UpperCamelCase )
for j, item in enumerate(_UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
A : Optional[int] = np.matmul(np.transpose(_UpperCamelCase ), np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_UpperCamelCase, 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 662 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Tuple = logging.get_logger(__name__)
__A : Union[str, Any] = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class A_ (A__ ):
UpperCAmelCase__ = """bert"""
def __init__( self , _A=3_0_5_2_2 , _A=7_6_8 , _A=1_2 , _A=1_2 , _A=3_0_7_2 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_1_2 , _A=2 , _A=0.02 , _A=1E-12 , _A=0 , _A="absolute" , _A=True , _A=None , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class A_ (A__ ):
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 130 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase__ :
def __init__( self ,A = None ):
if components is None:
UpperCAmelCase = []
UpperCAmelCase = list(_UpperCamelCase )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_UpperCamelCase ,self.__components ) ) + ")"
def __add__( self ,A ):
UpperCAmelCase = len(self )
if size == len(_UpperCamelCase ):
UpperCAmelCase = [self.__components[i] + other.component(_UpperCamelCase ) for i in range(_UpperCamelCase )]
return Vector(_UpperCamelCase )
else:
raise Exception("""must have the same size""" )
def __sub__( self ,A ):
UpperCAmelCase = len(self )
if size == len(_UpperCamelCase ):
UpperCAmelCase = [self.__components[i] - other.component(_UpperCamelCase ) for i in range(_UpperCamelCase )]
return Vector(_UpperCamelCase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(_UpperCamelCase ,(float, int) ):
UpperCAmelCase = [c * other for c in self.__components]
return Vector(_UpperCamelCase )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ) and len(self ) == len(_UpperCamelCase ):
UpperCAmelCase = len(self )
UpperCAmelCase = [self.__components[i] * other.component(_UpperCamelCase ) for i in range(_UpperCamelCase )]
return sum(_UpperCamelCase )
else: # error case
raise Exception("""invalid operand!""" )
def _UpperCamelCase ( self ):
return Vector(self.__components )
def _UpperCamelCase ( self ,A ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _UpperCamelCase ( self ,A ,A ):
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase = value
def _UpperCamelCase ( self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
UpperCAmelCase = [c**2 for c in self.__components]
return math.sqrt(sum(_UpperCamelCase ) )
def _UpperCamelCase ( self ,A ,A = False ):
UpperCAmelCase = self * other
UpperCAmelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( _snake_case ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return Vector([0] * dimension )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ))
UpperCAmelCase = [0] * dimension
UpperCAmelCase = 1
return Vector(SCREAMING_SNAKE_CASE_ )
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and (isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ))
)
return x * scalar + y
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
class lowerCamelCase__ :
def __init__( self ,A ,A ,A ):
UpperCAmelCase = matrix
UpperCAmelCase = w
UpperCAmelCase = h
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] + other.component(_UpperCamelCase ,_UpperCamelCase )
for j in range(self.__width )
]
matrix.append(_UpperCamelCase )
return Matrix(_UpperCamelCase ,self.__width ,self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] - other.component(_UpperCamelCase ,_UpperCamelCase )
for j in range(self.__width )
]
matrix.append(_UpperCamelCase )
return Matrix(_UpperCamelCase ,self.__width ,self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(_UpperCamelCase ,_UpperCamelCase ): # matrix-vector
if len(_UpperCamelCase ) == self.__width:
UpperCAmelCase = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] * other.component(_UpperCamelCase )
for j in range(self.__width )
]
ans.change_component(_UpperCamelCase ,sum(_UpperCamelCase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(_UpperCamelCase ,(int, float) ): # matrix-scalar
UpperCAmelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_UpperCamelCase ,self.__width ,self.__height )
return None
def _UpperCamelCase ( self ):
return self.__height
def _UpperCamelCase ( self ):
return self.__width
def _UpperCamelCase ( self ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
UpperCAmelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_UpperCamelCase ) ):
UpperCAmelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_UpperCamelCase ,self.__width - 1 ,self.__height - 1 ).determinant()
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_UpperCamelCase ,_UpperCamelCase )
else:
raise Exception("""Indices out of bounds""" )
def _UpperCamelCase ( self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase = [
self.__matrix[0][y] * self.cofactor(0 ,_UpperCamelCase ) for y in range(self.__width )
]
return sum(_UpperCamelCase )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = [[0] * n for _ in range(SCREAMING_SNAKE_CASE_ )]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = [
[random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )
]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 341 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ : int ) -> int:
'''simple docstring'''
lowercase =R'''\w+[.]\d+'''
lowercase =re.findall(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for pat in pats:
lowercase =key.replace(SCREAMING_SNAKE_CASE_ , '''_'''.join(pat.split('''.''' ) ) )
return key
def UpperCamelCase ( lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase =pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase =pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase =pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase =pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowercase =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase =pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase =pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCamelCase ( lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Dict=4_2 ) -> List[Any]:
'''simple docstring'''
lowercase ={k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase =flax_model.init_weights(PRNGKey(SCREAMING_SNAKE_CASE_ ) )
lowercase =flatten_dict(SCREAMING_SNAKE_CASE_ )
lowercase ={}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase =rename_key(SCREAMING_SNAKE_CASE_ )
lowercase =tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
lowercase , lowercase =rename_key_and_reshape_tensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
lowercase =jnp.asarray(SCREAMING_SNAKE_CASE_ )
return unflatten_dict(SCREAMING_SNAKE_CASE_ )
| 72 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
from math import isclose, sqrt
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
_lowerCAmelCase = point_y / 4 / point_x
_lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_lowerCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_lowerCAmelCase = outgoing_gradient**2 + 4
_lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
_lowerCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_lowerCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_lowerCAmelCase = x_minus if isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else x_plus
_lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def _lowerCamelCase ( snake_case = 1.4 , snake_case = -9.6 ):
_lowerCAmelCase = 0
_lowerCAmelCase = first_x_coord
_lowerCAmelCase = first_y_coord
_lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = next_point(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 192 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCAmelCase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = """levit"""
def __init__( self , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[128, 256, 384] , SCREAMING_SNAKE_CASE_=[4, 8, 12] , SCREAMING_SNAKE_CASE_=[4, 4, 4] , SCREAMING_SNAKE_CASE_=[16, 16, 16] , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=[2, 2, 2] , SCREAMING_SNAKE_CASE_=[2, 2, 2] , SCREAMING_SNAKE_CASE_=0.02 , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = kernel_size
lowerCamelCase_ = stride
lowerCamelCase_ = padding
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = depths
lowerCamelCase_ = key_dim
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = patch_size
lowerCamelCase_ = attention_ratio
lowerCamelCase_ = mlp_ratio
lowerCamelCase_ = initializer_range
lowerCamelCase_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCAmelCase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = version.parse('1.11' )
@property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
return 1E-4
| 42 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case : Any = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case : int = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class snake_case_ :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class snake_case_ :
def __init__( self :Dict ,__snake_case :List[str] ) -> List[str]:
a__ = None
for i in sorted(_UpperCamelCase ,reverse=_UpperCamelCase ):
a__ = Node(_UpperCamelCase ,self.head )
def __iter__( self :Tuple ) -> Any:
a__ = self.head
while node:
yield node.data
a__ = node.next_node
def __len__( self :List[str] ) -> Any:
return sum(1 for _ in self )
def __str__( self :Optional[int] ) -> str:
return " -> ".join([str(_UpperCamelCase ) for node in self] )
def __lowercase ( __lowerCAmelCase : SortedLinkedList , __lowerCAmelCase : SortedLinkedList ):
return SortedLinkedList(list(SCREAMING_SNAKE_CASE_ ) + list(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __SCREAMING_SNAKE_CASE ( ) -> None:
print('Making key files...' )
make_key_files('rsa' , 1024 )
print('Key files generation successful.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> tuple[tuple[int, int], tuple[int, int]]:
print('Generating prime p...' )
SCREAMING_SNAKE_CASE_ : Optional[int] = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE_ )
print('Generating prime q...' )
SCREAMING_SNAKE_CASE_ : Tuple = rabinMiller.generate_large_prime(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = p * q
print('Generating e that is relatively prime to (p - 1) * (q - 1)...' )
while True:
SCREAMING_SNAKE_CASE_ : str = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(SCREAMING_SNAKE_CASE_ , (p - 1) * (q - 1) ) == 1:
break
print('Calculating d that is mod inverse of e...' )
SCREAMING_SNAKE_CASE_ : List[Any] = cryptoMath.find_mod_inverse(SCREAMING_SNAKE_CASE_ , (p - 1) * (q - 1) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (n, e)
SCREAMING_SNAKE_CASE_ : List[Any] = (n, d)
return (public_key, private_key)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print('\nWARNING:' )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'Use a different name or delete these files and re-run this program.' )
sys.exit()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = generate_key(SCREAMING_SNAKE_CASE_ )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , 'w' ) as out_file:
out_file.write(f'{key_size},{public_key[0]},{public_key[1]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , 'w' ) as out_file:
out_file.write(f'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 345 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 0 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : List[Any] = [0 for i in range(r + 1 )]
# nc0 = 1
__magic_name__ : Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__magic_name__ : List[str] = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5)) | 436 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 0 |
from collections.abc import Iterable
from typing import Any
class snake_case_ :
'''simple docstring'''
def __init__( self, A_ = None ) -> Union[str, Any]:
UpperCAmelCase__ =value
UpperCAmelCase__ =None # Added in order to delete a node easier
UpperCAmelCase__ =None
UpperCAmelCase__ =None
def __repr__( self ) -> Any:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)}, indent=1 )
class snake_case_ :
'''simple docstring'''
def __init__( self, A_ = None ) -> Optional[Any]:
UpperCAmelCase__ =root
def __str__( self ) -> Optional[int]:
return str(self.root )
def __UpperCAmelCase ( self, A_, A_ ) -> List[str]:
if new_children is not None: # reset its kids
UpperCAmelCase__ =node.parent
if node.parent is not None: # reset its parent
if self.is_right(_UpperCamelCase ): # If it is the right children
UpperCAmelCase__ =new_children
else:
UpperCAmelCase__ =new_children
else:
UpperCAmelCase__ =new_children
def __UpperCAmelCase ( self, A_ ) -> Union[str, Any]:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __UpperCAmelCase ( self ) -> List[Any]:
return self.root is None
def __UpperCAmelCase ( self, A_ ) -> Union[str, Any]:
UpperCAmelCase__ =Node(_UpperCamelCase ) # create a new Node
if self.empty(): # if Tree is empty
UpperCAmelCase__ =new_node # set its root
else: # Tree is not empty
UpperCAmelCase__ =self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
UpperCAmelCase__ =new_node # We insert the new node in a leaf
break
else:
UpperCAmelCase__ =parent_node.left
else:
if parent_node.right is None:
UpperCAmelCase__ =new_node
break
else:
UpperCAmelCase__ =parent_node.right
UpperCAmelCase__ =parent_node
def __UpperCAmelCase ( self, *A_ ) -> List[str]:
for value in values:
self.__insert(_UpperCamelCase )
def __UpperCAmelCase ( self, A_ ) -> int:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
UpperCAmelCase__ =self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
UpperCAmelCase__ =node.left if value < node.value else node.right
return node
def __UpperCAmelCase ( self, A_ = None ) -> Optional[Any]:
if node is None:
if self.root is None:
return None
UpperCAmelCase__ =self.root
if not self.empty():
while node.right is not None:
UpperCAmelCase__ =node.right
return node
def __UpperCAmelCase ( self, A_ = None ) -> List[Any]:
if node is None:
UpperCAmelCase__ =self.root
if self.root is None:
return None
if not self.empty():
UpperCAmelCase__ =self.root
while node.left is not None:
UpperCAmelCase__ =node.left
return node
def __UpperCAmelCase ( self, A_ ) -> Optional[Any]:
UpperCAmelCase__ =self.search(_UpperCamelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_UpperCamelCase, _UpperCamelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(_UpperCamelCase, node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_UpperCamelCase, node.left )
else:
UpperCAmelCase__ =self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
UpperCAmelCase__ =(
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __UpperCAmelCase ( self, A_ ) -> Union[str, Any]:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __UpperCAmelCase ( self, A_=None ) -> List[str]:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __UpperCAmelCase ( self, A_, A_ ) -> str:
if node:
self.inorder(_UpperCamelCase, node.left )
arr.append(node.value )
self.inorder(_UpperCamelCase, node.right )
def __UpperCAmelCase ( self, A_, A_ ) -> Tuple:
UpperCAmelCase__ =[]
self.inorder(_UpperCamelCase, _UpperCamelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =[]
if curr_node is not None:
UpperCAmelCase__ =postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =(8, 3, 6, 1, 10, 14, 13, 4, 7)
UpperCAmelCase__ =BinarySearchTree()
for i in testlist:
t.insert(SCREAMING_SNAKE_CASE_ )
# Prints all the elements of the list in order traversal
print(SCREAMING_SNAKE_CASE_ )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn\'t exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn\'t exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 625 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__UpperCAmelCase : Dict = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a__ = field(
default=A__ ,metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a__ = field(
default='NER' ,metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
a__ = field(
default=A__ ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a__ = field(default=A__ ,metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a__ = field(
default=A__ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a__ = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
a__ = field(
default=A__ ,metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} ,)
a__ = field(
default=1_28 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
a__ = field(
default=A__ ,metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def A ( ):
"""simple docstring"""
snake_case_ :Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ , snake_case_ , snake_case_ :Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ , snake_case_ , snake_case_ :Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
snake_case_ :Optional[Any] = import_module("tasks" )
try:
snake_case_ :str = getattr(SCREAMING_SNAKE_CASE_, model_args.task_type )
snake_case_ :int = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", SCREAMING_SNAKE_CASE_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
snake_case_ :str = token_classification_task.get_labels(data_args.labels )
snake_case_ :Optional[Any] = dict(enumerate(SCREAMING_SNAKE_CASE_ ) )
snake_case_ :str = len(SCREAMING_SNAKE_CASE_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ :Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=SCREAMING_SNAKE_CASE_, idalabel=SCREAMING_SNAKE_CASE_, labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE_ )}, cache_dir=model_args.cache_dir, )
snake_case_ :Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, )
snake_case_ :List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE_, cache_dir=model_args.cache_dir, )
# Get datasets
snake_case_ :Dict = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE_, data_dir=data_args.data_dir, tokenizer=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
snake_case_ :Optional[Any] = (
TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE_, data_dir=data_args.data_dir, tokenizer=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def align_predictions(_A, _A ) -> Tuple[List[int], List[int]]:
snake_case_ :List[Any] = np.argmax(SCREAMING_SNAKE_CASE_, axis=2 )
snake_case_ , snake_case_ :Optional[Any] = preds.shape
snake_case_ :Union[str, Any] = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
snake_case_ :Tuple = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(_A ) -> Dict:
snake_case_ , snake_case_ :Tuple = align_predictions(p.predictions, p.label_ids )
return {
"accuracy_score": accuracy_score(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ),
"precision": precision_score(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ),
"recall": recall_score(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ),
"f1": fa_score(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ),
}
# Data collator
snake_case_ :int = DataCollatorWithPadding(SCREAMING_SNAKE_CASE_, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ :Optional[Any] = Trainer(
model=SCREAMING_SNAKE_CASE_, args=SCREAMING_SNAKE_CASE_, train_dataset=SCREAMING_SNAKE_CASE_, eval_dataset=SCREAMING_SNAKE_CASE_, compute_metrics=SCREAMING_SNAKE_CASE_, data_collator=SCREAMING_SNAKE_CASE_, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ :List[str] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case_ :str = trainer.evaluate()
snake_case_ :Optional[int] = os.path.join(training_args.output_dir, "eval_results.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_, "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s", SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
writer.write("%s = %s\n" % (key, value) )
results.update(SCREAMING_SNAKE_CASE_ )
# Predict
if training_args.do_predict:
snake_case_ :Union[str, Any] = TokenClassificationDataset(
token_classification_task=SCREAMING_SNAKE_CASE_, data_dir=data_args.data_dir, tokenizer=SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, )
snake_case_ , snake_case_ , snake_case_ :Union[str, Any] = trainer.predict(SCREAMING_SNAKE_CASE_ )
snake_case_ , snake_case_ :Any = align_predictions(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
snake_case_ :Optional[int] = os.path.join(training_args.output_dir, "test_results.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_, "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s", SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
snake_case_ :Optional[Any] = os.path.join(training_args.output_dir, "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE_, "w" ) as writer:
with open(os.path.join(data_args.data_dir, "test.txt" ), "r" ) as f:
token_classification_task.write_predictions_to_file(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return results
def A ( _A ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 584 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 662 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 0 |
import socket
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
UpperCAmelCase = socket.gethostname()
UpperCAmelCase = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
UpperCAmelCase = sock.recv(1024 )
if not data:
break
out_file.write(SCREAMING_SNAKE_CASE_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 130 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
UpperCAmelCase = get_activation("""swish""" )
self.assertIsInstance(_UpperCamelCase ,nn.SiLU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def _UpperCamelCase ( self ):
UpperCAmelCase = get_activation("""silu""" )
self.assertIsInstance(_UpperCamelCase ,nn.SiLU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def _UpperCamelCase ( self ):
UpperCAmelCase = get_activation("""mish""" )
self.assertIsInstance(_UpperCamelCase ,nn.Mish )
self.assertEqual(act(torch.tensor(-200 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
def _UpperCamelCase ( self ):
UpperCAmelCase = get_activation("""gelu""" )
self.assertIsInstance(_UpperCamelCase ,nn.GELU )
self.assertEqual(act(torch.tensor(-100 ,dtype=torch.floataa ) ).item() ,0 )
self.assertNotEqual(act(torch.tensor(-1 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(0 ,dtype=torch.floataa ) ).item() ,0 )
self.assertEqual(act(torch.tensor(20 ,dtype=torch.floataa ) ).item() ,20 )
| 341 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class __magic_name__ ( A__ , A__ ):
UpperCamelCase__ = """focalnet"""
def __init__( self , snake_case_=2_24 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=False , snake_case_=[1_92, 3_84, 7_68, 7_68] , snake_case_=[2, 2, 6, 2] , snake_case_=[2, 2, 2, 2] , snake_case_=[3, 3, 3, 3] , snake_case_="gelu" , snake_case_=4.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=False , snake_case_=1E-4 , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=0.02 , snake_case_=1E-5 , snake_case_=32 , snake_case_=None , snake_case_=None , **snake_case_ , ):
super().__init__(**_UpperCamelCase )
lowercase =image_size
lowercase =patch_size
lowercase =num_channels
lowercase =embed_dim
lowercase =use_conv_embed
lowercase =hidden_sizes
lowercase =depths
lowercase =focal_levels
lowercase =focal_windows
lowercase =hidden_act
lowercase =mlp_ratio
lowercase =hidden_dropout_prob
lowercase =drop_path_rate
lowercase =use_layerscale
lowercase =layerscale_value
lowercase =use_post_layernorm
lowercase =use_post_layernorm_in_modulation
lowercase =normalize_modulator
lowercase =initializer_range
lowercase =layer_norm_eps
lowercase =encoder_stride
lowercase =['''stem'''] + [f'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )]
lowercase , lowercase =get_aligned_output_features_output_indices(
out_features=_UpperCamelCase , out_indices=_UpperCamelCase , stage_names=self.stage_names )
| 72 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 0 |
from __future__ import annotations
def _lowerCamelCase ( snake_case ):
if not nums:
return 0
_lowerCAmelCase = nums[0]
_lowerCAmelCase = 0
for num in nums[1:]:
_lowerCAmelCase , _lowerCAmelCase = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
)
return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def _UpperCamelCase ( __UpperCamelCase = 1_50_00_00 ) -> int:
lowerCamelCase_ = defaultdict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 ,SCREAMING_SNAKE_CASE_ ,2 ):
if gcd(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) > 1:
continue
lowerCamelCase_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE_ ,limit + 1 ,SCREAMING_SNAKE_CASE_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 42 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 0 |
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('only integers accepted as input' )
else:
a__ = str(abs(SCREAMING_SNAKE_CASE_ ) )
a__ = [list(SCREAMING_SNAKE_CASE_ ) for char in range(len(SCREAMING_SNAKE_CASE_ ) )]
for index in range(len(SCREAMING_SNAKE_CASE_ ) ):
num_transpositions[index].pop(SCREAMING_SNAKE_CASE_ )
return max(
int(''.join(list(SCREAMING_SNAKE_CASE_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.02 , __lowerCAmelCase=[1, 16, 4, 4] , __lowerCAmelCase=None , ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE_ : str = num_channels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = scope
SCREAMING_SNAKE_CASE_ : Any = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE_ : List[Any] = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE_ : List[Any] = num_patches + 1
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, pixel_values, labels
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCamelCase , )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = ViTHybridModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTHybridForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( A__ , A__ , unittest.TestCase ):
__lowerCamelCase : Tuple = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__lowerCamelCase : Tuple = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : int = False
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE_ : List[Any] = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __A ( self ):
pass
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase , nn.Linear ) )
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _config_zero_init(_UpperCamelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(config=_UpperCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE_ : List[str] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __A ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : int = ViTHybridModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
@cached_property
def __A ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE_ : int = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
SCREAMING_SNAKE_CASE_ : int = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(images=_UpperCamelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : int = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 345 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
__magic_name__ : Union[str, Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
__magic_name__ : Any = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__magic_name__ : int = model(_UpperCamelCase )["last_hidden_state"]
__magic_name__ : Tuple = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _UpperCamelCase )
# compare the actual values for a slice.
__magic_name__ : str = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 436 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCamelCase_ = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCamelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCamelCase_ = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCamelCase_ = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCamelCase_ = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCamelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModel)
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCamelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class snake_case_ ( _BaseAutoModelClass ):
'''simple docstring'''
__UpperCamelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCamelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 625 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 0 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A ( _A, _A ):
"""simple docstring"""
snake_case_ :Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
snake_case_ :Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE_, stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" )
snake_case_ :int = transforms.Compose(
[
transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073), (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
snake_case_ :List[str] = transform(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE_ )
return image
def A ( _A ):
"""simple docstring"""
if "visual_encoder" in key:
snake_case_ :Dict = re.sub("visual_encoder*", "vision_model.encoder", SCREAMING_SNAKE_CASE_ )
if "blocks" in key:
snake_case_ :int = re.sub(R"blocks", "layers", SCREAMING_SNAKE_CASE_ )
if "attn" in key:
snake_case_ :List[Any] = re.sub(R"attn", "self_attn", SCREAMING_SNAKE_CASE_ )
if "norm1" in key:
snake_case_ :List[Any] = re.sub(R"norm1", "layer_norm1", SCREAMING_SNAKE_CASE_ )
if "norm2" in key:
snake_case_ :Dict = re.sub(R"norm2", "layer_norm2", SCREAMING_SNAKE_CASE_ )
if "encoder.norm" in key:
snake_case_ :List[Any] = re.sub(R"encoder.norm", "post_layernorm", SCREAMING_SNAKE_CASE_ )
if "encoder.patch_embed.proj" in key:
snake_case_ :List[str] = re.sub(R"encoder.patch_embed.proj", "embeddings.patch_embedding", SCREAMING_SNAKE_CASE_ )
if "encoder.pos_embed" in key:
snake_case_ :Optional[Any] = re.sub(R"encoder.pos_embed", "embeddings.position_embedding", SCREAMING_SNAKE_CASE_ )
if "encoder.cls_token" in key:
snake_case_ :Union[str, Any] = re.sub(R"encoder.cls_token", "embeddings.class_embedding", SCREAMING_SNAKE_CASE_ )
if "self_attn" in key:
snake_case_ :Optional[int] = re.sub(R"self_attn.proj", "self_attn.projection", SCREAMING_SNAKE_CASE_ )
return key
@torch.no_grad()
def A ( _A, _A=None ):
"""simple docstring"""
if config_path is not None:
snake_case_ :Optional[Any] = BlipConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
snake_case_ :Any = BlipConfig(projection_dim=512, text_config={}, vision_config={} )
snake_case_ :List[Any] = BlipForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval()
snake_case_ :int = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
snake_case_ :Union[str, Any] = blip_decoder(pretrained=SCREAMING_SNAKE_CASE_, image_size=384, vit="base" )
snake_case_ :List[Any] = pt_model.eval()
snake_case_ :Any = pt_model.state_dict()
for key in modified_state_dict.copy():
snake_case_ :List[str] = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
snake_case_ :Tuple = rename_key(SCREAMING_SNAKE_CASE_ )
snake_case_ :Dict = value
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
snake_case_ :Optional[Any] = 384
snake_case_ :str = load_demo_image(image_size=SCREAMING_SNAKE_CASE_, device="cpu" )
snake_case_ :List[Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
snake_case_ :Tuple = tokenizer(["a picture of"] ).input_ids
snake_case_ :Dict = hf_model.generate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
assert out[0].tolist() == [30_522, 1_037, 3_861, 1_997, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
snake_case_ :Optional[Any] = hf_model.generate(SCREAMING_SNAKE_CASE_ )
assert out[0].tolist() == [30_522, 1_037, 2_450, 3_564, 2_006, 1_996, 3_509, 2_007, 2_014, 3_899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
snake_case_ :Optional[Any] = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
snake_case_ :Optional[Any] = blip_vqa(pretrained=SCREAMING_SNAKE_CASE_, image_size=SCREAMING_SNAKE_CASE_, vit="base" )
vqa_model.eval()
snake_case_ :str = vqa_model.state_dict()
for key in modified_state_dict.copy():
snake_case_ :List[Any] = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
snake_case_ :Optional[int] = rename_key(SCREAMING_SNAKE_CASE_ )
snake_case_ :Dict = value
snake_case_ :List[str] = BlipForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
hf_vqa_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
snake_case_ :Dict = ["How many dogs are in this image?"]
snake_case_ :Dict = tokenizer(SCREAMING_SNAKE_CASE_, return_tensors="pt" ).input_ids
snake_case_ :List[str] = hf_vqa_model.generate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
snake_case_ :Optional[Any] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
snake_case_ :List[Any] = blip_itm(pretrained=SCREAMING_SNAKE_CASE_, image_size=SCREAMING_SNAKE_CASE_, vit="base" )
itm_model.eval()
snake_case_ :List[str] = itm_model.state_dict()
for key in modified_state_dict.copy():
snake_case_ :List[Any] = modified_state_dict.pop(SCREAMING_SNAKE_CASE_ )
snake_case_ :Optional[Any] = rename_key(SCREAMING_SNAKE_CASE_ )
snake_case_ :Optional[int] = value
snake_case_ :Any = BlipForImageTextRetrieval(SCREAMING_SNAKE_CASE_ )
snake_case_ :Optional[Any] = ["A picture of a woman with a dog sitting in a beach"]
snake_case_ :str = tokenizer(
SCREAMING_SNAKE_CASE_, return_tensors="pt", padding="max_length", truncation=SCREAMING_SNAKE_CASE_, max_length=35, ).input_ids
hf_itm_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
hf_itm_model.eval()
snake_case_ :List[str] = hf_itm_model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, use_itm_head=SCREAMING_SNAKE_CASE_ )
snake_case_ :List[str] = hf_itm_model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, use_itm_head=SCREAMING_SNAKE_CASE_ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0], dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 584 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE_:Optional[int] = logging.getLogger()
def __UpperCamelCase ( _lowerCAmelCase ) -> Any:
"""simple docstring"""
A : Optional[Any] = {}
A : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , """all_results.json""" )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f:
A : str = json.load(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class SCREAMING_SNAKE_CASE__ ( A__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
import xla_spawn
A : Optional[int] = self.get_auto_remove_tmp_dir()
A : List[str] = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(_UpperCamelCase, """argv""", _UpperCamelCase ):
A : Any = time()
xla_spawn.main()
A : Tuple = time()
A : int = get_results(_UpperCamelCase )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start, 500 )
def _lowerCAmelCase ( self ):
import xla_spawn
A : int = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(_UpperCamelCase, """argv""", _UpperCamelCase ):
xla_spawn.main()
| 662 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A : Tuple = logging.get_logger(__name__)
__A : Optional[Any] = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
__A : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase = importlib.import_module('''transformers''' )
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return None
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = get_file_from_repo(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader:
return json.load(SCREAMING_SNAKE_CASE_ )
class A_ :
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def _lowercase ( cls , _A , **_A ):
'''simple docstring'''
UpperCAmelCase = kwargs.pop('''config''' , _UpperCamelCase )
UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _UpperCamelCase )
UpperCAmelCase = True
UpperCAmelCase , UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase = config_dict.get('''image_processor_type''' , _UpperCamelCase )
UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase = config_dict.pop('''feature_extractor_type''' , _UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
UpperCAmelCase = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
UpperCAmelCase = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.image_processor_type``
UpperCAmelCase = getattr(_UpperCamelCase , '''image_processor_type''' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
UpperCAmelCase = image_processor_class_from_name(_UpperCamelCase )
UpperCAmelCase = image_processor_auto_map is not None
UpperCAmelCase = image_processor_class is not None or type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase = resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
UpperCAmelCase = get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase = kwargs.pop('''code_revision''' , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(_UpperCamelCase )]
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _lowercase ( _A , _A ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
| 130 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 0 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
UpperCAmelCase = 0
UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
UpperCAmelCase = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def _a ( _snake_case ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
UpperCAmelCase = 0
UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
UpperCAmelCase = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
UpperCAmelCase = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : Optional[int] = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = ['''YolosFeatureExtractor''']
_UpperCAmelCase : Dict = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
def _lowerCamelCase ( snake_case = 1_000 ):
_lowerCAmelCase , _lowerCAmelCase = 1, 1
_lowerCAmelCase = 2
while True:
_lowerCAmelCase = 0
_lowerCAmelCase = fa + fa
_lowerCAmelCase , _lowerCAmelCase = fa, f
index += 1
for _ in str(SCREAMING_SNAKE_CASE_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 192 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 0 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase_ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 42 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
from math import isqrt
def __lowercase ( __lowerCAmelCase : int ):
a__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
a__ = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]]
def __lowercase ( __lowerCAmelCase : int = 1_0**8 ):
a__ = calculate_prime_numbers(max_number // 2 )
a__ = 0
a__ = 0
a__ = len(SCREAMING_SNAKE_CASE_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=4 , ):
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : Any = seq_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_attention_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : int = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : str = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : str = num_choices
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case_ ( A__ , unittest.TestCase ):
__lowerCamelCase : Union[str, Any] = True
__lowerCamelCase : str = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = FlaxRobertaModelTester(self )
@slow
def __A ( self ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = model_class_name.from_pretrained('roberta-base' , from_pt=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCamelCase )
| 345 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 0 |
'''simple docstring'''
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = "T5Config"
class _snake_case ( A__ ):
'''simple docstring'''
__snake_case = """mt5"""
__snake_case = MTaConfig
class _snake_case ( A__ ):
'''simple docstring'''
__snake_case = """mt5"""
__snake_case = MTaConfig
class _snake_case ( A__ ):
'''simple docstring'''
__snake_case = """mt5"""
__snake_case = MTaConfig | 436 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 0 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCamelCase_ = 2_99_79_24_58
# Symbols
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = symbols('ct x y z')
def _UpperCAmelCase ( A ):
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def _UpperCAmelCase ( A ):
'''simple docstring'''
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def _UpperCAmelCase ( A ):
'''simple docstring'''
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _UpperCAmelCase ( A , A = None ):
'''simple docstring'''
if event is None:
UpperCAmelCase__ =np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCamelCase_ = transform(29_97_92_45)
print('Example of four vector: ')
print(f"""ct\' = {four_vector[0]}""")
print(f"""x\' = {four_vector[1]}""")
print(f"""y\' = {four_vector[2]}""")
print(f"""z\' = {four_vector[3]}""")
# Substitute symbols with numerical values
UpperCamelCase_ = {ct: c, x: 1, y: 1, z: 1}
UpperCamelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"""\n{numerical_vector}""")
| 625 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase (A__ ):
'''simple docstring'''
a__ = ["""input_features"""]
def __init__( self , a=80 , a=1_60_00 , a=1_60 , a=30 , a=4_00 , a=0.0 , a=False , **a , ):
"""simple docstring"""
super().__init__(
feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ :str = n_fft
snake_case_ :Dict = hop_length
snake_case_ :Union[str, Any] = chunk_length
snake_case_ :List[str] = chunk_length * sampling_rate
snake_case_ :int = self.n_samples // hop_length
snake_case_ :Any = sampling_rate
snake_case_ :Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_UpperCamelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_UpperCamelCase , norm="slaney" , mel_scale="slaney" , )
def _a ( self , a ):
"""simple docstring"""
snake_case_ :Union[str, Any] = spectrogram(
_UpperCamelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
snake_case_ :Any = log_spec[:, :-1]
snake_case_ :int = np.maximum(_UpperCamelCase , log_spec.max() - 8.0 )
snake_case_ :Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( a , a , a = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
snake_case_ :Dict = np.array(_UpperCamelCase , np.intaa )
snake_case_ :Dict = []
for vector, length in zip(_UpperCamelCase , attention_mask.sum(-1 ) ):
snake_case_ :int = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
snake_case_ :Optional[Any] = padding_value
normed_input_values.append(_UpperCamelCase )
else:
snake_case_ :Optional[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , a , a = True , a = None , a = None , a = None , a = "max_length" , a = None , a = None , a = None , **a , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
snake_case_ :Optional[int] = isinstance(_UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
snake_case_ :Tuple = is_batched_numpy or (
isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ :Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ):
snake_case_ :Tuple = np.asarray(_UpperCamelCase , dtype=np.floataa )
elif isinstance(_UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ :Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ :Tuple = [np.asarray([raw_speech] ).T]
snake_case_ :List[Any] = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
snake_case_ :int = self.pad(
_UpperCamelCase , padding=_UpperCamelCase , max_length=max_length if max_length else self.n_samples , truncation=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
snake_case_ :Tuple = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
snake_case_ :Optional[Any] = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
snake_case_ :Union[str, Any] = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
snake_case_ :Dict = [self._np_extract_fbank_features(_UpperCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , _UpperCamelCase ):
snake_case_ :Optional[Any] = [np.asarray(_UpperCamelCase , dtype=np.floataa ) for feature in input_features]
else:
snake_case_ :Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
snake_case_ :str = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
snake_case_ :Dict = padded_inputs.convert_to_tensors(_UpperCamelCase )
return padded_inputs
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = copy.deepcopy(self.__dict__ )
snake_case_ :Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 584 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_:str = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE_:List[Any] = 250_004
SCREAMING_SNAKE_CASE_:str = 250_020
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = MBartaaTokenizer
__lowerCamelCase : Any = MBartaaTokenizerFast
__lowerCamelCase : Dict = True
__lowerCamelCase : Any = True
def _lowerCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A : Dict = MBartaaTokenizer(_UpperCamelCase, src_lang="""en_XX""", tgt_lang="""ro_RO""", keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self ):
A : int = """<s>"""
A : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ), _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ), _UpperCamelCase )
def _lowerCAmelCase ( self ):
A : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<s>""" )
self.assertEqual(vocab_keys[1], """<pad>""" )
self.assertEqual(vocab_keys[-1], """<mask>""" )
self.assertEqual(len(_UpperCamelCase ), 1054 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 1054 )
def _lowerCAmelCase ( self ):
A : Optional[int] = MBartaaTokenizer(_UpperCamelCase, src_lang="""en_XX""", tgt_lang="""ro_RO""", keep_accents=_UpperCamelCase )
A : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCamelCase, ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
A : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCamelCase, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""], )
A : Union[str, Any] = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
A : str = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase, [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""], )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : Dict = {"""input_ids""": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase, model_name="""facebook/mbart-large-50""", revision="""d3913889c59cd5c9e456b269c376325eabad57e2""", )
def _lowerCAmelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A : List[str] = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase, **_UpperCamelCase )
A : List[Any] = self.tokenizer_class.from_pretrained(_UpperCamelCase, **_UpperCamelCase )
A : Dict = tempfile.mkdtemp()
A : str = tokenizer_r.save_pretrained(_UpperCamelCase )
A : Dict = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
A : Optional[int] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_UpperCamelCase, _UpperCamelCase )
# Checks everything loads correctly in the same way
A : int = tokenizer_r.from_pretrained(_UpperCamelCase )
A : Any = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase, _UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=True
A : Dict = tempfile.mkdtemp()
A : Any = tokenizer_r.save_pretrained(_UpperCamelCase, legacy_format=_UpperCamelCase )
A : str = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCamelCase, _UpperCamelCase )
# Checks everything loads correctly in the same way
A : List[str] = tokenizer_r.from_pretrained(_UpperCamelCase )
A : Tuple = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase, _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=False
A : List[str] = tempfile.mkdtemp()
A : Any = tokenizer_r.save_pretrained(_UpperCamelCase, legacy_format=_UpperCamelCase )
A : Optional[Any] = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] = tokenizer_r.from_pretrained(_UpperCamelCase )
A : Optional[int] = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase, _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = """facebook/mbart-large-50-one-to-many-mmt"""
__lowerCamelCase : Optional[int] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__lowerCamelCase : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__lowerCamelCase : Any = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def _lowerCAmelCase ( cls ):
A : str = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="""en_XX""", tgt_lang="""ro_RO""" )
A : int = 1
return cls
def _lowerCAmelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""], 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""], 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""], 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""], 25_0038 )
def _lowerCAmelCase ( self ):
A : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, _UpperCamelCase )
def _lowerCAmelCase ( self ):
self.assertIn(_UpperCamelCase, self.tokenizer.all_special_ids )
A : Optional[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
A : str = self.tokenizer.decode(_UpperCamelCase, skip_special_tokens=_UpperCamelCase )
A : List[Any] = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=_UpperCamelCase )
self.assertEqual(_UpperCamelCase, _UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token, _UpperCamelCase )
def _lowerCAmelCase ( self ):
A : Optional[Any] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0], _UpperCamelCase )
A : Optional[int] = 10
A : str = self.tokenizer(_UpperCamelCase, max_length=_UpperCamelCase, truncation=_UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0], _UpperCamelCase )
self.assertEqual(ids[-1], 2 )
self.assertEqual(len(_UpperCamelCase ), _UpperCamelCase )
def _lowerCAmelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ), [25_0053, 25_0001] )
def _lowerCAmelCase ( self ):
A : int = tempfile.mkdtemp()
A : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCamelCase )
A : List[Any] = MBartaaTokenizer.from_pretrained(_UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, _UpperCamelCase )
@require_torch
def _lowerCAmelCase ( self ):
A : Union[str, Any] = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=_UpperCamelCase, return_tensors="""pt""" )
A : Dict = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowerCAmelCase ( self ):
A : int = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=_UpperCamelCase, truncation=_UpperCamelCase, max_length=len(self.expected_src_tokens ), return_tensors="""pt""", )
A : Optional[int] = shift_tokens_right(batch["""labels"""], self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCamelCase, _UpperCamelCase )
self.assertEqual((2, 14), batch.input_ids.shape )
self.assertEqual((2, 14), batch.attention_mask.shape )
A : int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, _UpperCamelCase )
self.assertEqual(2, batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def _lowerCAmelCase ( self ):
A : Tuple = self.tokenizer(self.src_text, padding=_UpperCamelCase, truncation=_UpperCamelCase, max_length=3, return_tensors="""pt""" )
A : List[Any] = self.tokenizer(
text_target=self.tgt_text, padding=_UpperCamelCase, truncation=_UpperCamelCase, max_length=10, return_tensors="""pt""" )
A : Dict = targets["""input_ids"""]
A : List[Any] = shift_tokens_right(_UpperCamelCase, self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def _lowerCAmelCase ( self ):
A : Tuple = self.tokenizer._build_translation_inputs(
"""A test""", return_tensors="""pt""", src_lang="""en_XX""", tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_UpperCamelCase ), {
# en_XX, A, test, EOS
"""input_ids""": [[25_0004, 62, 3034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_0001,
}, )
| 662 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 0 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list[int]:
'''simple docstring'''
UpperCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE_ ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ = 80_0800 , UpperCamelCase__ = 80_0800 ) -> int:
'''simple docstring'''
UpperCAmelCase = degree * loga(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = int(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = calculate_prime_numbers(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'{solution() = }')
| 130 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_UpperCamelCase = logging.getLogger(__name__)
class lowerCamelCase__ ( A__ ):
SCREAMING_SNAKE_CASE = """summarization"""
SCREAMING_SNAKE_CASE = ["""loss"""]
SCREAMING_SNAKE_CASE = ROUGE_KEYS
SCREAMING_SNAKE_CASE = """rouge2"""
def __init__( self ,A ,**A ):
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(_UpperCamelCase ,num_labels=_UpperCamelCase ,mode=self.mode ,**_UpperCamelCase )
use_task_specific_params(self.model ,"""summarization""" )
save_git_info(self.hparams.output_dir )
UpperCAmelCase = Path(self.output_dir ) / """metrics.json"""
UpperCAmelCase = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams ,self.hparams_save_path )
UpperCAmelCase = 0
UpperCAmelCase = defaultdict(_UpperCamelCase )
UpperCAmelCase = self.config.model_type
UpperCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
UpperCAmelCase = {
"""data_dir""": self.hparams.data_dir,
"""max_source_length""": self.hparams.max_source_length,
"""prefix""": self.model.config.prefix or """""",
}
UpperCAmelCase = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
UpperCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase = get_git_info()["""repo_sha"""]
UpperCAmelCase = hparams.num_workers
UpperCAmelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,_UpperCamelCase ):
UpperCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase = self.decoder_start_token_id
UpperCAmelCase = (
SeqaSeqDataset if hasattr(self.tokenizer ,"""prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
UpperCAmelCase = False
UpperCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase = self.hparams.eval_max_gen_length
else:
UpperCAmelCase = self.model.config.max_length
UpperCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(_UpperCamelCase ,Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / """tok_batch.json""" )
UpperCAmelCase = True
return readable_batch
def _UpperCamelCase ( self ,A ,**A ):
return self.model(_UpperCamelCase ,**_UpperCamelCase )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.tokenizer.batch_decode(
_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
return lmap(str.strip ,_UpperCamelCase )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.tokenizer.pad_token_id
UpperCAmelCase , UpperCAmelCase = batch["""input_ids"""], batch["""attention_mask"""]
UpperCAmelCase = batch["""labels"""]
if isinstance(self.model ,_UpperCamelCase ):
UpperCAmelCase = self.model._shift_right(_UpperCamelCase )
else:
UpperCAmelCase = shift_tokens_right(_UpperCamelCase ,_UpperCamelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase = decoder_input_ids
self.save_readable_batch(_UpperCamelCase )
UpperCAmelCase = self(_UpperCamelCase ,attention_mask=_UpperCamelCase ,decoder_input_ids=_UpperCamelCase ,use_cache=_UpperCamelCase )
UpperCAmelCase = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase = nn.CrossEntropyLoss(ignore_index=_UpperCamelCase )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) )
else:
UpperCAmelCase = nn.functional.log_softmax(_UpperCamelCase ,dim=-1 )
UpperCAmelCase , UpperCAmelCase = label_smoothed_nll_loss(
_UpperCamelCase ,_UpperCamelCase ,self.hparams.label_smoothing ,ignore_index=_UpperCamelCase )
return (loss,)
@property
def _UpperCamelCase ( self ):
return self.tokenizer.pad_token_id
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = self._step(_UpperCamelCase )
UpperCAmelCase = dict(zip(self.loss_names ,_UpperCamelCase ) )
# tokens per batch
UpperCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
UpperCAmelCase = batch["""input_ids"""].shape[0]
UpperCAmelCase = batch["""input_ids"""].eq(self.pad ).sum()
UpperCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _UpperCamelCase ( self ,A ,A ):
return self._generative_step(_UpperCamelCase )
def _UpperCamelCase ( self ,A ,A="val" ):
self.step_count += 1
UpperCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase = losses["""loss"""]
UpperCAmelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
UpperCAmelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase = torch.tensor(_UpperCamelCase ).type_as(_UpperCamelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_UpperCamelCase )
UpperCAmelCase = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
UpperCAmelCase = self.step_count
self.metrics[prefix].append(_UpperCamelCase ) # callback writes this to self.metrics_save_path
UpperCAmelCase = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _UpperCamelCase ( self ,A ,A ):
return calculate_rouge(_UpperCamelCase ,_UpperCamelCase )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase = self.model.generate(
batch["""input_ids"""] ,attention_mask=batch["""attention_mask"""] ,use_cache=_UpperCamelCase ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
UpperCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0]
UpperCAmelCase = self.ids_to_clean_text(_UpperCamelCase )
UpperCAmelCase = self.ids_to_clean_text(batch["""labels"""] )
UpperCAmelCase = self._step(_UpperCamelCase )
UpperCAmelCase = dict(zip(self.loss_names ,_UpperCamelCase ) )
UpperCAmelCase = self.calc_generative_metrics(_UpperCamelCase ,_UpperCamelCase )
UpperCAmelCase = np.mean(lmap(_UpperCamelCase ,_UpperCamelCase ) )
base_metrics.update(gen_time=_UpperCamelCase ,gen_len=_UpperCamelCase ,preds=_UpperCamelCase ,target=_UpperCamelCase ,**_UpperCamelCase )
return base_metrics
def _UpperCamelCase ( self ,A ,A ):
return self._generative_step(_UpperCamelCase )
def _UpperCamelCase ( self ,A ):
return self.validation_epoch_end(_UpperCamelCase ,prefix="""test""" )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = self.n_obs[type_path]
UpperCAmelCase = self.target_lens[type_path]
UpperCAmelCase = self.dataset_class(
self.tokenizer ,type_path=_UpperCamelCase ,n_obs=_UpperCamelCase ,max_target_length=_UpperCamelCase ,**self.dataset_kwargs ,)
return dataset
def _UpperCamelCase ( self ,A ,A ,A = False ):
UpperCAmelCase = self.get_dataset(_UpperCamelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase = dataset.make_sortish_sampler(_UpperCamelCase ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase ,batch_size=_UpperCamelCase ,collate_fn=dataset.collate_fn ,shuffle=_UpperCamelCase ,num_workers=self.num_workers ,sampler=_UpperCamelCase ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_UpperCamelCase ,batch_sampler=_UpperCamelCase ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
_UpperCamelCase ,batch_size=_UpperCamelCase ,collate_fn=dataset.collate_fn ,shuffle=_UpperCamelCase ,num_workers=self.num_workers ,sampler=_UpperCamelCase ,)
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_dataloader("""train""" ,batch_size=self.hparams.train_batch_size ,shuffle=_UpperCamelCase )
return dataloader
def _UpperCamelCase ( self ):
return self.get_dataloader("""val""" ,batch_size=self.hparams.eval_batch_size )
def _UpperCamelCase ( self ):
return self.get_dataloader("""test""" ,batch_size=self.hparams.eval_batch_size )
@staticmethod
def _UpperCamelCase ( A ,A ):
BaseTransformer.add_model_specific_args(_UpperCamelCase ,_UpperCamelCase )
add_generic_args(_UpperCamelCase ,_UpperCamelCase )
parser.add_argument(
"""--max_source_length""" ,default=1_024 ,type=_UpperCamelCase ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--max_target_length""" ,default=56 ,type=_UpperCamelCase ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--val_max_target_length""" ,default=142 ,type=_UpperCamelCase ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument(
"""--test_max_target_length""" ,default=142 ,type=_UpperCamelCase ,help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) ,)
parser.add_argument("""--freeze_encoder""" ,action="""store_true""" )
parser.add_argument("""--freeze_embeds""" ,action="""store_true""" )
parser.add_argument("""--sortish_sampler""" ,action="""store_true""" ,default=_UpperCamelCase )
parser.add_argument("""--overwrite_output_dir""" ,action="""store_true""" ,default=_UpperCamelCase )
parser.add_argument("""--max_tokens_per_batch""" ,type=_UpperCamelCase ,default=_UpperCamelCase )
parser.add_argument("""--logger_name""" ,type=_UpperCamelCase ,choices=["""default""", """wandb""", """wandb_shared"""] ,default="""default""" )
parser.add_argument("""--n_train""" ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" ,type=_UpperCamelCase ,default=500 ,required=_UpperCamelCase ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" ,type=_UpperCamelCase ,default="""summarization""" ,required=_UpperCamelCase ,help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" ,type=_UpperCamelCase ,default=0.0 ,required=_UpperCamelCase )
parser.add_argument("""--src_lang""" ,type=_UpperCamelCase ,default="""""" ,required=_UpperCamelCase )
parser.add_argument("""--tgt_lang""" ,type=_UpperCamelCase ,default="""""" ,required=_UpperCamelCase )
parser.add_argument("""--eval_beams""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,required=_UpperCamelCase )
parser.add_argument(
"""--val_metric""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,required=_UpperCamelCase ,choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" ,type=_UpperCamelCase ,default=_UpperCamelCase ,help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" ,type=_UpperCamelCase ,default=1 ,required=_UpperCamelCase ,help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) ,)
return parser
class lowerCamelCase__ ( A__ ):
SCREAMING_SNAKE_CASE = """translation"""
SCREAMING_SNAKE_CASE = ["""loss"""]
SCREAMING_SNAKE_CASE = ["""bleu"""]
SCREAMING_SNAKE_CASE = """bleu"""
def __init__( self ,A ,**A ):
super().__init__(_UpperCamelCase ,**_UpperCamelCase )
UpperCAmelCase = hparams.src_lang
UpperCAmelCase = hparams.tgt_lang
def _UpperCamelCase ( self ,A ,A ):
return calculate_bleu(_UpperCamelCase ,_UpperCamelCase )
def _a ( _snake_case , _snake_case=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
check_output_dir(SCREAMING_SNAKE_CASE_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
UpperCAmelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
UpperCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase = False
UpperCAmelCase = args.val_metric == """loss"""
UpperCAmelCase = generic_train(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE_ ) , early_stopping_callback=SCREAMING_SNAKE_CASE_ , logger=SCREAMING_SNAKE_CASE_ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
UpperCAmelCase = """"""
UpperCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE_ ) )
if checkpoints:
UpperCAmelCase = checkpoints[-1]
UpperCAmelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
_UpperCamelCase = pl.Trainer.add_argparse_args(parser)
_UpperCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_UpperCamelCase = parser.parse_args()
main(args)
| 341 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 0 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __magic_name__ ( datasets.BeamBasedBuilder ):
def _A( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_UpperCamelCase , )
def _A( self , snake_case_ , snake_case_ ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def _A( self , snake_case_ , snake_case_ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCamelCase )
class __magic_name__ ( datasets.BeamBasedBuilder ):
def _A( self ):
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_UpperCamelCase , )
def _A( self , snake_case_ , snake_case_ ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def _A( self , snake_case_ , snake_case_ ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_UpperCamelCase )
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class __magic_name__ ( A__ ):
@require_beam
def _A( self ):
lowercase =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase =DummyBeamDataset(cache_dir=_UpperCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowercase =builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _UpperCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _UpperCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def _A( self ):
import apache_beam as beam
lowercase =beam.io.parquetio.WriteToParquet
lowercase =len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase =DummyBeamDataset(cache_dir=_UpperCamelCase , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
lowercase =partial(_UpperCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowercase =builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _UpperCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _UpperCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def _A( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase =DummyBeamDataset(cache_dir=_UpperCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _A( self ):
lowercase =len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowercase =NestedBeamDataset(cache_dir=_UpperCamelCase , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , f'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
lowercase =builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _UpperCamelCase )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _UpperCamelCase )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_UpperCamelCase , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 72 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_lowercase: Optional[Any] = logging.get_logger(__name__)
class lowerCamelCase__ ( A__ ):
def __init__( self : Dict , *lowercase__ : List[Any] , **lowercase__ : List[str] ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 192 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
A_ = False
A_ = True
A_ = False
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
A_ = parser.parse_args()
A_ = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
A_ = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
A_ = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
A_ = reader.read()
A_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
A_ = UNetaDModel(**config)
else:
A_ = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
A_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
A_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
A_ = config[key]
del config[key]
A_ = [k.replace("UNetRes", "") for k in config["down_block_types"]]
A_ = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
A_ = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
A_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
A_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
A_ = param_value
A_ = True
if not has_changed:
A_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 42 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 0 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case_ :
def __init__( self :List[str] ,__snake_case :Optional[Any] ,__snake_case :List[Any]=13 ,__snake_case :Any=7 ,__snake_case :int=True ,__snake_case :List[str]=True ,__snake_case :Optional[int]=True ,__snake_case :Union[str, Any]=True ,__snake_case :Union[str, Any]=99 ,__snake_case :Any=32 ,__snake_case :int=5 ,__snake_case :Union[str, Any]=4 ,__snake_case :Tuple=37 ,__snake_case :Union[str, Any]="gelu" ,__snake_case :Optional[Any]=0.1 ,__snake_case :Optional[Any]=0.1 ,__snake_case :int=5_12 ,__snake_case :str=16 ,__snake_case :List[Any]=2 ,__snake_case :Any=0.02 ,__snake_case :Optional[int]=3 ,__snake_case :List[str]=4 ,__snake_case :List[str]=None ,) -> Dict:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_token_type_ids:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_UpperCamelCase ,initializer_range=self.initializer_range ,)
def lowerCamelCase__( self :str ,__snake_case :Tuple ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :List[str] ,__snake_case :int ) -> Dict:
a__ = NystromformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
a__ = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,token_type_ids=_UpperCamelCase )
a__ = model(_UpperCamelCase ,token_type_ids=_UpperCamelCase )
a__ = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__( self :List[str] ,__snake_case :int ,__snake_case :Tuple ,__snake_case :List[str] ,__snake_case :Dict ,__snake_case :Optional[Any] ,__snake_case :Dict ,__snake_case :Dict ) -> Tuple:
a__ = NystromformerForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
a__ = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,token_type_ids=_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__( self :Dict ,__snake_case :str ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Any ,__snake_case :Optional[int] ,__snake_case :Union[str, Any] ,__snake_case :int ) -> Tuple:
a__ = NystromformerForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
a__ = model(
_UpperCamelCase ,attention_mask=_UpperCamelCase ,token_type_ids=_UpperCamelCase ,start_positions=_UpperCamelCase ,end_positions=_UpperCamelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :Dict ,__snake_case :str ,__snake_case :Optional[int] ,__snake_case :Any ) -> Optional[Any]:
a__ = self.num_labels
a__ = NystromformerForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
a__ = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,token_type_ids=_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[Any] ,__snake_case :int ,__snake_case :Any ,__snake_case :Union[str, Any] ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :List[Any] ) -> List[Any]:
a__ = self.num_labels
a__ = NystromformerForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
a__ = model(_UpperCamelCase ,attention_mask=_UpperCamelCase ,token_type_ids=_UpperCamelCase ,labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Optional[int] ,__snake_case :str ,__snake_case :Optional[int] ,__snake_case :int ,__snake_case :Dict ,__snake_case :Dict ,__snake_case :Optional[Any] ) -> Optional[int]:
a__ = self.num_choices
a__ = NystromformerForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
a__ = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a__ = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a__ = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
a__ = model(
_UpperCamelCase ,attention_mask=_UpperCamelCase ,token_type_ids=_UpperCamelCase ,labels=_UpperCamelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class snake_case_ (A__ , A__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCamelCase__( self :Optional[Any] ) -> Any:
a__ = NystromformerModelTester(self )
a__ = ConfigTester(self ,config_class=_UpperCamelCase ,hidden_size=37 )
def lowerCamelCase__( self :Tuple ) -> Dict:
self.config_tester.run_common_tests()
def lowerCamelCase__( self :List[Any] ) -> Optional[int]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowerCamelCase__( self :List[str] ) -> int:
a__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def lowerCamelCase__( self :Optional[int] ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def lowerCamelCase__( self :Any ) -> Tuple:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> str:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = NystromformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :str ) -> Optional[int]:
a__ = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
a__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
a__ = model(_UpperCamelCase )[0]
a__ = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape ,_UpperCamelCase )
a__ = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_UpperCamelCase ,atol=1E-4 ) )
@slow
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ = 'the [MASK] of Belgium is Brussels'
a__ = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
a__ = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
a__ = tokenizer(_UpperCamelCase ,return_tensors='pt' )
with torch.no_grad():
a__ = model(encoding.input_ids ).logits
a__ = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_UpperCamelCase ) ,'capital' )
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class snake_case_ ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = parent
def __A ( self ):
return {}
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[str] = '<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>'
SCREAMING_SNAKE_CASE_ : Tuple = '\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n '
return [html_string_a, html_string_a]
@require_bsa
class snake_case_ ( A__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __A ( self ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def __A ( self ):
# Initialize feature_extractor
SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[Any] = get_html_strings()[0]
SCREAMING_SNAKE_CASE_ : Any = feature_extractor(_UpperCamelCase )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']]
SCREAMING_SNAKE_CASE_ : List[str] = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']]
# fmt: on
self.assertEqual(encoding.nodes , _UpperCamelCase )
self.assertEqual(encoding.xpaths , _UpperCamelCase )
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = get_html_strings()
SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor(_UpperCamelCase )
# fmt: off
SCREAMING_SNAKE_CASE_ : Tuple = expected_nodes + [['My First Heading', 'My first paragraph.']]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = expected_xpaths + [['/html/body/h1', '/html/body/p']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _UpperCamelCase )
self.assertEqual(encoding.xpaths , _UpperCamelCase )
| 345 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(SCREAMING_SNAKE_CASE_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 436 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 625 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def A ( ):
"""simple docstring"""
snake_case_ , snake_case_ :str = 9, 14 # noqa: F841
snake_case_ :Any = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case_ :Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case_ :Dict = mst(SCREAMING_SNAKE_CASE_ )
snake_case_ :List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case_ :Optional[int] = tuple(answer[:2] )
snake_case_ :Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 584 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
SCREAMING_SNAKE_CASE_:Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE_:Any = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE_:Optional[int] = {
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
"""simple docstring"""
assert len(str(SCREAMING_SNAKE_CASE_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
A : str = year // 100
A : List[Any] = (5 * (century % 4) + 2) % 7
A : Tuple = year % 100
A : List[str] = centurian % 12
A : Tuple = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
A : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
A : Optional[int] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 662 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class A_ (A__ ):
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 5
# Realm tok
UpperCAmelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCAmelCase = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCAmelCase = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
def _lowercase ( self ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def _lowercase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = RealmConfig(num_block_records=self.num_block_records )
return config
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=_UpperCamelCase , )
return block_records
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_config()
UpperCAmelCase = self.get_dummy_retriever()
UpperCAmelCase = retriever.tokenizer
UpperCAmelCase = np.array([0, 3] , dtype='''long''' )
UpperCAmelCase = tokenizer(['''Test question'''] ).input_ids
UpperCAmelCase = tokenizer(
['''the fourth'''] , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ).input_ids
UpperCAmelCase = config.reader_seq_len
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever(
_UpperCamelCase , _UpperCamelCase , answer_ids=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='''np''' )
self.assertEqual(len(_UpperCamelCase ) , 2 )
self.assertEqual(len(_UpperCamelCase ) , 2 )
self.assertEqual(len(_UpperCamelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_config()
UpperCAmelCase = self.get_dummy_retriever()
UpperCAmelCase = retriever.tokenizer
UpperCAmelCase = np.array([0, 3, 5] , dtype='''long''' )
UpperCAmelCase = tokenizer(['''Test question'''] ).input_ids
UpperCAmelCase = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ).input_ids
UpperCAmelCase = config.reader_seq_len
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = retriever(
_UpperCamelCase , _UpperCamelCase , answer_ids=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , _UpperCamelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCamelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCamelCase )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
UpperCAmelCase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
UpperCAmelCase = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 130 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 341 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : list ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowercase =grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
lowercase =grid[row_n]
lowercase =fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase =grid[row_n]
return grid[-1][-1]
def UpperCamelCase ( lowercase_ : list , lowercase_ : list ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
def A__ ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
if gpta_config_file == "":
_lowerCAmelCase = GPTaConfig()
else:
_lowerCAmelCase = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = GPTaModel(SCREAMING_SNAKE_CASE_ )
# Load weights from numpy
load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
_lowerCAmelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
_lowerCAmelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
_lowercase: Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 192 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class UpperCAmelCase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE_ = Features({'audio': Audio()} )
SCREAMING_SNAKE_CASE_ = Features({'transcription': Value('string' )} )
SCREAMING_SNAKE_CASE_ = "audio"
SCREAMING_SNAKE_CASE_ = "transcription"
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , _UpperCamelCase ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
lowerCamelCase_ = copy.deepcopy(self )
lowerCamelCase_ = self.input_schema.copy()
lowerCamelCase_ = features[self.audio_column]
lowerCamelCase_ = input_schema
return task_template
@property
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 42 |
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
snake_case : List[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __lowercase ( __lowerCAmelCase : bytes ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
a__ = F'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(SCREAMING_SNAKE_CASE_ )
a__ = ''.join(bin(SCREAMING_SNAKE_CASE_ )[2:].zfill(8 ) for byte in data )
a__ = len(SCREAMING_SNAKE_CASE_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
a__ = b'=' * ((6 - len(SCREAMING_SNAKE_CASE_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE_ ) % 6)
else:
a__ = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 6 ) ).encode()
+ padding
)
def __lowercase ( __lowerCAmelCase : str ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
a__ = (
'argument should be a bytes-like object or ASCII string, '
F'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
a__ = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
a__ = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
a__ = encoded_data[:-padding]
a__ = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
a__ = ''.join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE_ ) )[2:].zfill(6 ) for char in encoded_data )
a__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 8 )
]
return bytes(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> List[str]:
SCREAMING_SNAKE_CASE_ : List[Any] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE_ )['input_ids']
SCREAMING_SNAKE_CASE_ : List[Any] = len(example['content'] ) / len(output['input_ids'] )
return output
lowerCAmelCase__: Optional[int] = HfArgumentParser(PretokenizationArguments)
lowerCAmelCase__: Any = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase__: Any = multiprocessing.cpu_count()
lowerCAmelCase__: List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowerCAmelCase__: List[Any] = time.time()
lowerCAmelCase__: int = load_dataset(args.dataset_name, split="train")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowerCAmelCase__: Tuple = time.time()
lowerCAmelCase__: Union[str, Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowerCAmelCase__: int = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 345 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 | 0 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
_SCREAMING_SNAKE_CASE : List[Any] = "1"
_SCREAMING_SNAKE_CASE : str = "0"
_SCREAMING_SNAKE_CASE : Tuple = "1"
_SCREAMING_SNAKE_CASE : Union[str, Any] = ort.SessionOptions()
_SCREAMING_SNAKE_CASE : Union[str, Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
_SCREAMING_SNAKE_CASE : str = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
_SCREAMING_SNAKE_CASE : Optional[int] = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
_SCREAMING_SNAKE_CASE : List[Any] = ort.RunOptions()
_SCREAMING_SNAKE_CASE : int = 1_28
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : str = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE : Optional[Any] = np.ones((batch, sequence), dtype=np.intaa)
_SCREAMING_SNAKE_CASE : List[Any] = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
_SCREAMING_SNAKE_CASE : int = time.time()
_SCREAMING_SNAKE_CASE : List[str] = 20_00
_SCREAMING_SNAKE_CASE : Optional[int] = {}
for iter in range(max_iters):
_SCREAMING_SNAKE_CASE : int = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 10_00 / max_iters)) | 436 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "spiece.model"}
UpperCAmelCase_ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"t5-small": 5_12,
"t5-base": 5_12,
"t5-large": 5_12,
"t5-3b": 5_12,
"t5-11b": 5_12,
}
UpperCAmelCase_ = "▁"
class __UpperCamelCase ( A__ ):
__A : Any = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCamelCase , _UpperCamelCase="</s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase=100 , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=True , **_UpperCamelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase = [f'''<extra_id_{i}>''' for i in range(_UpperCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase = len(set(filter(lambda _UpperCamelCase : bool('''extra_id''' in str(_UpperCamelCase ) ) , _UpperCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
f'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
_UpperCAmelCase = legacy
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , extra_ids=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = extra_ids
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCamelCase , )
return max_model_length
@property
def UpperCamelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCamelCase( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCamelCase )) + [1]
return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self ):
return list(
set(filter(lambda _UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) )
def UpperCamelCase( self ):
return [self._convert_token_to_id(_UpperCamelCase ) for token in self.get_sentinel_tokens()]
def UpperCamelCase( self , _UpperCamelCase ):
if len(_UpperCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase = self._add_eos_if_not_present(_UpperCamelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _UpperCamelCase ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
_UpperCAmelCase = SPIECE_UNDERLINE + text.replace(_UpperCamelCase , ''' ''' )
return super().tokenize(_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , **_UpperCamelCase ):
if not self.legacy:
_UpperCAmelCase = text.startswith(_UpperCamelCase )
if is_first:
_UpperCAmelCase = text[1:]
_UpperCAmelCase = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(_UpperCamelCase ):
_UpperCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCamelCase( self , _UpperCamelCase ):
if token.startswith('''<extra_id_''' ):
_UpperCAmelCase = re.match(R'''<extra_id_(\d+)>''' , _UpperCamelCase )
_UpperCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase ):
if index < self.sp_model.get_piece_size():
_UpperCAmelCase = self.sp_model.IdToPiece(_UpperCamelCase )
else:
_UpperCAmelCase = f'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def UpperCamelCase( self , _UpperCamelCase ):
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(_UpperCamelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
if not os.path.isdir(_UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,) | 32 | 0 |
UpperCamelCase_ = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '\"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCamelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def _UpperCAmelCase ( A ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def _UpperCAmelCase ( A ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ ="Morse code here!"
print(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ =encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase__ =decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 625 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
_UpperCAmelCase = str(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 9 and set(SCREAMING_SNAKE_CASE_ ) == set('''123456789''' )
def A__ ( ) -> int | None:
"""simple docstring"""
for base_num in range(99_99 , 49_99 , -1 ):
_UpperCAmelCase = 10_00_02 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
for base_num in range(3_33 , 99 , -1 ):
_UpperCAmelCase = 1_00_20_03 * base_num
if is_9_pandigital(SCREAMING_SNAKE_CASE_ ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[Any] = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class __lowerCAmelCase (A__ ):
'''simple docstring'''
a__ = """funnel"""
a__ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , a=3_05_22 , a=[4, 4, 4] , a=None , a=2 , a=7_68 , a=12 , a=64 , a=30_72 , a="gelu_new" , a=0.1 , a=0.1 , a=0.0 , a=0.1 , a=None , a=1e-9 , a="mean" , a="relative_shift" , a=True , a=True , a=True , **a , ):
"""simple docstring"""
snake_case_ :List[str] = vocab_size
snake_case_ :str = block_sizes
snake_case_ :Tuple = [1] * len(_UpperCamelCase ) if block_repeats is None else block_repeats
assert len(_UpperCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case_ :Dict = num_decoder_layers
snake_case_ :Union[str, Any] = d_model
snake_case_ :Optional[Any] = n_head
snake_case_ :List[Any] = d_head
snake_case_ :List[Any] = d_inner
snake_case_ :List[str] = hidden_act
snake_case_ :Optional[Any] = hidden_dropout
snake_case_ :Optional[Any] = attention_dropout
snake_case_ :List[Any] = activation_dropout
snake_case_ :int = initializer_range
snake_case_ :str = initializer_std
snake_case_ :Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
snake_case_ :Dict = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
snake_case_ :str = attention_type
snake_case_ :int = separate_cls
snake_case_ :List[Any] = truncate_seq
snake_case_ :List[Any] = pool_q_only
super().__init__(**_UpperCamelCase )
@property
def _a ( self ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def _a ( self , a ):
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def _a ( self ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def _a ( self , a ):
"""simple docstring"""
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 584 |
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float ) -> np.ndarray:
"""simple docstring"""
return np.where(vector > 0 , SCREAMING_SNAKE_CASE_ , (alpha * (np.exp(SCREAMING_SNAKE_CASE_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class SCREAMING_SNAKE_CASE__ ( A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = BarthezTokenizer
__lowerCamelCase : Union[str, Any] = BarthezTokenizerFast
__lowerCamelCase : List[Any] = True
__lowerCamelCase : str = True
def _lowerCAmelCase ( self ):
super().setUp()
A : Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname, legacy_format=_UpperCamelCase )
A : List[str] = tokenizer
def _lowerCAmelCase ( self ):
A : str = """<pad>"""
A : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ), _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ), _UpperCamelCase )
def _lowerCAmelCase ( self ):
A : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], """<s>""" )
self.assertEqual(vocab_keys[1], """<pad>""" )
self.assertEqual(vocab_keys[-1], """<mask>""" )
self.assertEqual(len(_UpperCamelCase ), 10_1122 )
def _lowerCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size, 10_1122 )
@require_torch
def _lowerCAmelCase ( self ):
A : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A : List[str] = [0, 57, 3018, 7_0307, 91, 2]
A : Tuple = self.tokenizer(
_UpperCamelCase, max_length=len(_UpperCamelCase ), padding=_UpperCamelCase, truncation=_UpperCamelCase, return_tensors="""pt""" )
self.assertIsInstance(_UpperCamelCase, _UpperCamelCase )
self.assertEqual((2, 6), batch.input_ids.shape )
self.assertEqual((2, 6), batch.attention_mask.shape )
A : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCamelCase, _UpperCamelCase )
def _lowerCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
A : Optional[Any] = self.get_tokenizer()
A : Union[str, Any] = self.get_rust_tokenizer()
A : Optional[int] = """I was born in 92000, and this is falsé."""
A : Optional[Any] = tokenizer.tokenize(_UpperCamelCase )
A : Any = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase, _UpperCamelCase )
A : List[Any] = tokenizer.encode(_UpperCamelCase, add_special_tokens=_UpperCamelCase )
A : int = rust_tokenizer.encode(_UpperCamelCase, add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase, _UpperCamelCase )
A : int = self.get_rust_tokenizer()
A : Optional[int] = tokenizer.encode(_UpperCamelCase )
A : Optional[int] = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase, _UpperCamelCase )
@slow
def _lowerCAmelCase ( self ):
# fmt: off
A : List[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
A : Union[str, Any] = [
"""Le transformeur est un modèle d\'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase, model_name="""moussaKam/mbarthez""", revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""", sequences=_UpperCamelCase, )
| 662 |
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 32 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> list:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase , UpperCAmelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
UpperCAmelCase = result + left + right
return input_list
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return input_list
UpperCAmelCase = list(SCREAMING_SNAKE_CASE_ )
# iteration for two-way merging
UpperCAmelCase = 2
while p <= len(SCREAMING_SNAKE_CASE_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = i
UpperCAmelCase = i + p - 1
UpperCAmelCase = (low + high + 1) // 2
UpperCAmelCase = merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# final merge of last two parts
if p * 2 >= len(SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase = i
UpperCAmelCase = merge(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__A : Tuple = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
__A : Dict = []
else:
__A : int = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 130 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Any = DanceDiffusionPipeline
__A : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__A : Tuple = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__A : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__A : List[str] = False
__A : str = False
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCamelCase , use_timestep_embedding=_UpperCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_UpperCAmelCase = IPNDMScheduler()
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
if str(_UpperCamelCase ).startswith('''mps''' ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
else:
_UpperCAmelCase = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = DanceDiffusionPipeline(**_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCAmelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_local()
@skip_mps
def UpperCamelCase( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCamelCase( self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCamelCase( self ):
return super().test_attention_slicing_forward_pass()
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = torch_device
_UpperCAmelCase = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(generator=_UpperCamelCase , num_inference_steps=100 , audio_length_in_s=4.096 )
_UpperCAmelCase = output.audios
_UpperCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCAmelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2 | 32 | 0 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def _a ( _snake_case , _snake_case = 0.0 , _snake_case = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
UpperCAmelCase_ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
UpperCAmelCase_ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModel)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class __UpperCamelCase ( _BaseAutoModelClass ):
__A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
) | 32 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( lowercase_ : list[int] , lowercase_ : int ) -> list[int]:
'''simple docstring'''
lowercase =0
lowercase =len(SCREAMING_SNAKE_CASE_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase =i + 1
else:
lowercase =j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 72 |
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded) | 32 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase: Union[str, Any] = '''src/diffusers'''
_lowercase: Dict = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase: str = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase: Dict = spec.loader.load_module()
def _lowerCamelCase ( snake_case , snake_case ):
return line.startswith(SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , SCREAMING_SNAKE_CASE_ ) is not None
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = object_name.split('.' )
_lowerCAmelCase = 0
# First let's find the module where our object lives.
_lowerCAmelCase = parts[i]
while i < len(SCREAMING_SNAKE_CASE_ ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , F'{module}.py' ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , F'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase = f.readlines()
# Now let's find the class / func in the code!
_lowerCAmelCase = ''
_lowerCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE_ ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCAmelCase = line_index
while line_index < len(SCREAMING_SNAKE_CASE_ ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE_ )
_lowercase: str = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase: Union[str, Any] = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase: str = re.compile(R'''<FILL\s+[^>]*>''')
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = code.split('\n' )
_lowerCAmelCase = 0
while idx < len(SCREAMING_SNAKE_CASE_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE_ ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = len(get_indent(SCREAMING_SNAKE_CASE_ ) ) > 0
if has_indent:
_lowerCAmelCase = F'class Bla:\n{code}'
_lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = black.format_str(SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase = style_docstrings_in_code(SCREAMING_SNAKE_CASE_ )
return result[len('class Bla:\n' ) :] if has_indent else result
def _lowerCamelCase ( snake_case , snake_case=False ):
with open(SCREAMING_SNAKE_CASE_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase = f.readlines()
_lowerCAmelCase = []
_lowerCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = search.groups()
_lowerCAmelCase = find_code_in_diffusers(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = get_indent(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCAmelCase = theoretical_indent
_lowerCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCAmelCase = True
while line_index < len(SCREAMING_SNAKE_CASE_ ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
break
_lowerCAmelCase = lines[line_index]
_lowerCAmelCase = _should_continue(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and re.search(F'^{indent}# End copy' , SCREAMING_SNAKE_CASE_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase = lines[start_index:line_index]
_lowerCAmelCase = ''.join(SCREAMING_SNAKE_CASE_ )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCAmelCase = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE_ ) is None]
_lowerCAmelCase = '\n'.join(SCREAMING_SNAKE_CASE_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_lowerCAmelCase = replace_pattern.replace('with' , '' ).split(',' )
_lowerCAmelCase = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = pattern.groups()
_lowerCAmelCase = re.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if option.strip() == "all-casing":
_lowerCAmelCase = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
_lowerCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCAmelCase = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE_ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
return diffs
def _lowerCamelCase ( snake_case = False ):
_lowerCAmelCase = glob.glob(os.path.join(SCREAMING_SNAKE_CASE_ , '**/*.py' ) , recursive=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for filename in all_files:
_lowerCAmelCase = is_copy_consistent(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE_ ) > 0:
_lowerCAmelCase = '\n'.join(SCREAMING_SNAKE_CASE_ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
_lowercase: Any = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase: int = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 192 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
__A : int = ["""pixel_values"""]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
super().__init__(**_UpperCamelCase )
_UpperCAmelCase = size if size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCAmelCase = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name='''crop_size''' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(_UpperCamelCase , size=size['''shortest_edge'''] , default_to_square=_UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size['''height'''], size['''width''']) , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ):
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(_UpperCamelCase , param_name='''crop_size''' , default_to_square=_UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(_UpperCamelCase )
if not is_batched(_UpperCamelCase ):
_UpperCAmelCase = [images]
if not valid_images(_UpperCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _UpperCamelCase ( __UpperCamelCase ) -> Any: # picklable for multiprocessing
return x.sum()
def _UpperCamelCase ( __UpperCamelCase ) -> Union[str, Any]: # picklable for multiprocessing
return i + 1
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
class UpperCAmelCase ( A__ ):
'''simple docstring'''
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = {}
lowerCamelCase_ = []
lowerCamelCase_ = 1
lowerCamelCase_ = [1, 2]
lowerCamelCase_ = {'a': 1, 'b': 2}
lowerCamelCase_ = {'a': [1, 2], 'b': [3, 4]}
lowerCamelCase_ = {'a': {'1': 1}, 'b': 2}
lowerCamelCase_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowerCamelCase_ = {}
lowerCamelCase_ = []
lowerCamelCase_ = 2
lowerCamelCase_ = [2, 3]
lowerCamelCase_ = {'a': 2, 'b': 3}
lowerCamelCase_ = {'a': [2, 3], 'b': [4, 5]}
lowerCamelCase_ = {'a': {'1': 2}, 'b': 3}
lowerCamelCase_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
lowerCamelCase_ = 2
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase )
lowerCamelCase_ = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
lowerCamelCase_ = {'a': 2, 'b': 0, 'c': 2}
lowerCamelCase_ = {
'a': np.eye(2 ).astype(_UpperCamelCase ),
'b': np.zeros(3 ).astype(_UpperCamelCase ),
'c': np.ones(2 ).astype(_UpperCamelCase ),
}
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , map_numpy=_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_UpperCamelCase , _UpperCamelCase , map_numpy=_UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_UpperCamelCase , _UpperCamelCase , map_numpy=_UpperCamelCase , num_proc=_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_UpperCamelCase , _UpperCamelCase , map_numpy=_UpperCamelCase , num_proc=_UpperCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_UpperCamelCase ): # can't pickle a local lambda
map_nested(lambda SCREAMING_SNAKE_CASE_ : x + 1 , _UpperCamelCase , num_proc=_UpperCamelCase )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = {'a': 1, 'b': 2}
lowerCamelCase_ = {'a': 3, 'b': 4}
lowerCamelCase_ = {'a': 5, 'b': 6}
lowerCamelCase_ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ) , _UpperCamelCase )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = """bar"""
lowerCamelCase_ = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_UpperCamelCase , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
lowerCamelCase_ = {f'''{i}''': i for i in range(SCREAMING_SNAKE_CASE_ )}
lowerCamelCase_ = map_nested(lambda __UpperCamelCase : x + 10 ,SCREAMING_SNAKE_CASE_ ,num_proc=SCREAMING_SNAKE_CASE_ ,parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
import tensorflow as tf
from tensorflow.keras import layers
lowerCamelCase_ = layers.Dense(2 )
def gen_random_output():
lowerCamelCase_ = tf.random.uniform((1, 3) )
return model(_UpperCamelCase ).numpy()
with temp_seed(42 , set_tensorflow=_UpperCamelCase ):
lowerCamelCase_ = gen_random_output()
with temp_seed(42 , set_tensorflow=_UpperCamelCase ):
lowerCamelCase_ = gen_random_output()
lowerCamelCase_ = gen_random_output()
np.testing.assert_equal(_UpperCamelCase , _UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def UpperCamelCase( self ) -> Union[str, Any]:
'''simple docstring'''
import torch
def gen_random_output():
lowerCamelCase_ = torch.nn.Linear(3 , 2 )
lowerCamelCase_ = torch.rand(1 , 3 )
return model(_UpperCamelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_UpperCamelCase ):
lowerCamelCase_ = gen_random_output()
with temp_seed(42 , set_pytorch=_UpperCamelCase ):
lowerCamelCase_ = gen_random_output()
lowerCamelCase_ = gen_random_output()
np.testing.assert_equal(_UpperCamelCase , _UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowerCamelCase_ = gen_random_output()
with temp_seed(42 ):
lowerCamelCase_ = gen_random_output()
lowerCamelCase_ = gen_random_output()
np.testing.assert_equal(_UpperCamelCase , _UpperCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' ,[{}] )
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' ,[
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] ,)
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = NestedDataStructure(SCREAMING_SNAKE_CASE_ ).flatten()
assert output == expected_output
def _UpperCamelCase ( ) -> int:
lowerCamelCase_ = A(x=1 ,y='foobar' )
lowerCamelCase_ = {'x': 1, 'y': 'foobar'}
assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output
lowerCamelCase_ = {'a': {'b': A(x=10 ,y='foo' )}, 'c': [A(x=20 ,y='bar' )]}
lowerCamelCase_ = {'a': {'b': {'x': 10, 'y': 'foo'}}, 'c': [{'x': 20, 'y': 'bar'}]}
assert asdict(SCREAMING_SNAKE_CASE_ ) == expected_output
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
asdict([1, A(x=10 ,y='foo' )] )
def _UpperCamelCase ( __UpperCamelCase ) -> int:
return text.split()
def _UpperCamelCase ( __UpperCamelCase ) -> Any:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _UpperCamelCase ( ) -> str:
with Pool(2 ) as pool:
lowerCamelCase_ = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ ,_split_text ,kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(SCREAMING_SNAKE_CASE_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowerCamelCase_ = list(iflatmap_unordered(SCREAMING_SNAKE_CASE_ ,_split_text ,kwargs_iterable=[{'text': 'hello there'}] * 10 ) )
assert out.count('hello' ) == 10
assert out.count('there' ) == 10
assert len(SCREAMING_SNAKE_CASE_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowerCamelCase_ = []
for yield_time, content in iflatmap_unordered(
SCREAMING_SNAKE_CASE_ ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(SCREAMING_SNAKE_CASE_ )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(SCREAMING_SNAKE_CASE_ ) == 4
| 42 |
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=A__ ):
__A : str = ["""torch""", """scipy"""]
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def UpperCamelCase( cls , *_UpperCamelCase , **_UpperCamelCase ):
requires_backends(cls , ['''torch''', '''scipy'''] ) | 32 | 0 |
snake_case : Union[str, Any] = '''\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''
snake_case : Optional[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
snake_case : Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 335 |
def A__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(n + 1 )]
_UpperCAmelCase = 1
_UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''') | 32 | 0 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase__: List[Any] = get_logger(__name__)
class snake_case_ :
def __init__( self , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : Dict = (
os.path.join(_UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
SCREAMING_SNAKE_CASE_ : str = Extractor
def __A ( self , __lowerCAmelCase ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.abspath(_UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCamelCase ) )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase ):
return force_extract or (
not os.path.isfile(_UpperCamelCase ) and not (os.path.isdir(_UpperCamelCase ) and os.listdir(_UpperCamelCase ))
)
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = False ):
SCREAMING_SNAKE_CASE_ : int = self.extractor.infer_extractor_format(_UpperCamelCase )
if not extractor_format:
return input_path
SCREAMING_SNAKE_CASE_ : Dict = self._get_output_path(_UpperCamelCase )
if self._do_extract(_UpperCamelCase , _UpperCamelCase ):
self.extractor.extract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return output_path
class snake_case_ ( A__ ):
@classmethod
@abstractmethod
def __A ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
...
@staticmethod
@abstractmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
...
class snake_case_ ( A__ , A__ ):
__lowerCamelCase : List[bytes] = []
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
with open(_UpperCamelCase , 'rb' ) as f:
return f.read(_UpperCamelCase )
@classmethod
def __A ( cls , __lowerCAmelCase , __lowerCAmelCase = b"" ):
if not magic_number:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(len(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
SCREAMING_SNAKE_CASE_ : Dict = cls.read_magic_number(_UpperCamelCase , _UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class snake_case_ ( A__ ):
@classmethod
def __A ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
return tarfile.is_tarfile(_UpperCamelCase )
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
def resolved(__lowerCAmelCase ) -> str:
return os.path.realpath(os.path.abspath(_UpperCamelCase ) )
def badpath(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCamelCase , _UpperCamelCase ) ).startswith(_UpperCamelCase )
def badlink(__lowerCAmelCase , __lowerCAmelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
SCREAMING_SNAKE_CASE_ : str = resolved(os.path.join(_UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : int = resolved(_UpperCamelCase )
for finfo in members:
if badpath(finfo.name , _UpperCamelCase ):
logger.error(F'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(F'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(_UpperCamelCase , _UpperCamelCase ):
logger.error(F'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Dict = tarfile.open(_UpperCamelCase )
tar_file.extractall(_UpperCamelCase , members=TarExtractor.safemembers(_UpperCamelCase , _UpperCamelCase ) )
tar_file.close()
class snake_case_ ( A__ ):
__lowerCamelCase : int = [b"""\x1F\x8B"""]
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
with gzip.open(_UpperCamelCase , 'rb' ) as gzip_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( A__ ):
__lowerCamelCase : Optional[Any] = [
b"""PK\x03\x04""",
b"""PK\x05\x06""", # empty archive
b"""PK\x07\x08""", # spanned archive
]
@classmethod
def __A ( cls , __lowerCAmelCase , __lowerCAmelCase = b"" ):
if super().is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCamelCase , 'rb' ) as fp:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _EndRecData(_UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
SCREAMING_SNAKE_CASE_ : Dict = fp.read(_UpperCamelCase ) # CD is where we expect it to be
if len(_UpperCamelCase ) == sizeCentralDir:
SCREAMING_SNAKE_CASE_ : Optional[int] = struct.unpack(_UpperCamelCase , _UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with zipfile.ZipFile(_UpperCamelCase , 'r' ) as zip_file:
zip_file.extractall(_UpperCamelCase )
zip_file.close()
class snake_case_ ( A__ ):
__lowerCamelCase : str = [b"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
with lzma.open(_UpperCamelCase ) as compressed_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( A__ ):
__lowerCamelCase : str = [b"""Rar!\x1a\x07\x00""", b"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
if not config.RARFILE_AVAILABLE:
raise ImportError('Please pip install rarfile' )
import rarfile
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
SCREAMING_SNAKE_CASE_ : Any = rarfile.RarFile(_UpperCamelCase )
rf.extractall(_UpperCamelCase )
rf.close()
class snake_case_ ( A__ ):
__lowerCamelCase : Any = [b"""\x28\xb5\x2F\xFD"""]
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('Please pip install zstandard' )
import zstandard as zstd
SCREAMING_SNAKE_CASE_ : str = zstd.ZstdDecompressor()
with open(_UpperCamelCase , 'rb' ) as ifh, open(_UpperCamelCase , 'wb' ) as ofh:
dctx.copy_stream(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( A__ ):
__lowerCamelCase : Tuple = [b"""\x42\x5A\x68"""]
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
with bza.open(_UpperCamelCase , 'rb' ) as compressed_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ ( A__ ):
__lowerCamelCase : Union[str, Any] = [b"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
if not config.PY7ZR_AVAILABLE:
raise ImportError('Please pip install py7zr' )
import pyazr
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
with pyazr.SevenZipFile(_UpperCamelCase , 'r' ) as archive:
archive.extractall(_UpperCamelCase )
class snake_case_ ( A__ ):
__lowerCamelCase : Optional[Any] = [b"""\x04\x22\x4D\x18"""]
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
if not config.LZ4_AVAILABLE:
raise ImportError('Please pip install lz4' )
import lza.frame
with lza.frame.open(_UpperCamelCase , 'rb' ) as compressed_file:
with open(_UpperCamelCase , 'wb' ) as extracted_file:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
class snake_case_ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__lowerCamelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def __A ( cls ):
return max(
len(_UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCamelCase , _UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def __A ( __lowerCAmelCase , __lowerCAmelCase ):
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCamelCase , magic_number_length=_UpperCamelCase )
except OSError:
return b""
@classmethod
def __A ( cls , __lowerCAmelCase , __lowerCAmelCase = False ):
warnings.warn(
'Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'infer_extractor_format\' instead.' , category=_UpperCamelCase , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = cls.infer_extractor_format(_UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def __A ( cls , __lowerCAmelCase ): # <Added version="2.4.0"/>
SCREAMING_SNAKE_CASE_ : List[str] = cls._get_magic_number_max_length()
SCREAMING_SNAKE_CASE_ : Optional[int] = cls._read_magic_number(_UpperCamelCase , _UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCamelCase , magic_number=_UpperCamelCase ):
return extractor_format
@classmethod
def __A ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = "deprecated" , ):
os.makedirs(os.path.dirname(_UpperCamelCase ) , exist_ok=_UpperCamelCase )
# Prevent parallel extractions
SCREAMING_SNAKE_CASE_ : Dict = str(Path(_UpperCamelCase ).with_suffix('.lock' ) )
with FileLock(_UpperCamelCase ):
shutil.rmtree(_UpperCamelCase , ignore_errors=_UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCamelCase , _UpperCamelCase ): # passed as positional arg
warnings.warn(
'Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '
'Use \'extractor_format\' instead.' , category=_UpperCamelCase , )
SCREAMING_SNAKE_CASE_ : Dict = extractor if extractor != 'deprecated' else extractor_format
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
else:
warnings.warn(
'Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '
'exception in 3.0.0.' , category=_UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCamelCase ):
return extractor.extract(_UpperCamelCase , _UpperCamelCase )
| 345 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class __UpperCamelCase ( A__ ):
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F"""{torch_layer} layer.weight does not match"""
__magic_name__ : Optional[int] = nn.Parameter(SCREAMING_SNAKE_CASE_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"""{torch_layer} layer.bias does not match"""
__magic_name__ : Union[str, Any] = nn.Parameter(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Optional[Any] = np.asarray(weights[0] )
__magic_name__ : int = np.asarray(weights[1] )
__magic_name__ : Optional[int] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : List[str] = np.asarray(weights[0] )
__magic_name__ : str = np.asarray(weights[1] )
__magic_name__ : int = np.asarray(weights[2] )
__magic_name__ : Tuple = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : List[str] = weights[0][0][0]
__magic_name__ : Union[str, Any] = np.asarray(layer_norm_a[0] )
__magic_name__ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# lsh weights + output
__magic_name__ : int = weights[0][1]
if len(SCREAMING_SNAKE_CASE_ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ )
# intermediate weighs
__magic_name__ : Dict = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE_ ) == 4:
__magic_name__ : Union[str, Any] = intermediate_weights[2]
# layernorm 2
__magic_name__ : Dict = np.asarray(intermediate_weights[0][0] )
__magic_name__ : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# intermediate dense
__magic_name__ : Dict = np.asarray(intermediate_weights[1][0] )
__magic_name__ : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# intermediate out
__magic_name__ : Optional[int] = np.asarray(intermediate_weights[4][0] )
__magic_name__ : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : int = torch_model.reformer
# word embeds
__magic_name__ : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE_ ):
__magic_name__ : Optional[int] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__magic_name__ : Dict = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"""{position_embeddings[emb_idx]} emb does not match"""
__magic_name__ : Optional[int] = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ) )
__magic_name__ : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__magic_name__ : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# output layer norm
__magic_name__ : Optional[int] = np.asarray(weights[7][0] )
__magic_name__ : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# output embeddings
__magic_name__ : Optional[Any] = np.asarray(weights[9][0] )
__magic_name__ : Union[str, Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : str = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F"""Building PyTorch model from configuration: {config}""" )
__magic_name__ : Any = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as f:
__magic_name__ : Dict = pickle.load(SCREAMING_SNAKE_CASE_ )["weights"]
set_model_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config.hidden_size )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 436 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCamelCase ( A__ ):
__A : Dict = ["""image_processor""", """tokenizer"""]
__A : List[str] = """BridgeTowerImageProcessor"""
__A : str = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = True , _UpperCamelCase = None , **_UpperCamelCase , ):
_UpperCAmelCase = self.tokenizer(
text=_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , stride=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_overflowing_tokens=_UpperCamelCase , return_special_tokens_mask=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , return_length=_UpperCamelCase , verbose=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase , )
# add pixel_values + pixel_mask
_UpperCAmelCase = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , do_normalize=_UpperCamelCase , do_center_crop=_UpperCamelCase , **_UpperCamelCase )
encoding.update(_UpperCamelCase )
return encoding
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase( self , *_UpperCamelCase , **_UpperCamelCase ):
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 32 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class snake_case_ :
'''simple docstring'''
def __init__( self, A_, A_=13, A_=10, A_=3, A_=2, A_=2, A_=True, A_=True, A_=32, A_=5, A_=4, A_=37, A_="gelu", A_=0.1, A_=0.1, A_=10, A_=0.02, A_="divided_space_time", A_=None, ) -> Optional[int]:
UpperCAmelCase__ =parent
UpperCAmelCase__ =batch_size
UpperCAmelCase__ =image_size
UpperCAmelCase__ =num_channels
UpperCAmelCase__ =patch_size
UpperCAmelCase__ =num_frames
UpperCAmelCase__ =is_training
UpperCAmelCase__ =use_labels
UpperCAmelCase__ =hidden_size
UpperCAmelCase__ =num_hidden_layers
UpperCAmelCase__ =num_attention_heads
UpperCAmelCase__ =intermediate_size
UpperCAmelCase__ =hidden_act
UpperCAmelCase__ =hidden_dropout_prob
UpperCAmelCase__ =attention_probs_dropout_prob
UpperCAmelCase__ =attention_type
UpperCAmelCase__ =initializer_range
UpperCAmelCase__ =scope
UpperCAmelCase__ =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCAmelCase__ =(image_size // patch_size) ** 2
UpperCAmelCase__ =(num_frames) * self.num_patches_per_frame + 1
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ =None
if self.use_labels:
UpperCAmelCase__ =ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase__ =self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ =TimesformerConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, attention_type=self.attention_type, )
UpperCAmelCase__ =self.num_labels
return config
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Union[str, Any]:
UpperCAmelCase__ =TimesformerModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase__ =model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self, A_, A_, A_ ) -> Optional[Any]:
UpperCAmelCase__ =TimesformerForVideoClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase__ =model(_UpperCamelCase )
# verify the logits shape
UpperCAmelCase__ =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape, _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ =config_and_inputs
UpperCAmelCase__ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( A__, A__, unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =TimesformerModelTester(self )
UpperCAmelCase__ =ConfigTester(
self, config_class=_UpperCamelCase, has_text_modality=_UpperCamelCase, hidden_size=37 )
def __UpperCAmelCase ( self, A_, A_, A_=False ) -> Optional[Any]:
UpperCAmelCase__ =copy.deepcopy(_UpperCamelCase )
if return_labels:
if model_class in get_values(_UpperCamelCase ):
UpperCAmelCase__ =torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_UpperCamelCase )
return inputs_dict
def __UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
UpperCAmelCase__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase, nn.Linear ) )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =model_class(_UpperCamelCase )
UpperCAmelCase__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ =[*signature.parameters.keys()]
UpperCAmelCase__ =["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> int:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ =TimesformerModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
if not self.has_attentions:
pass
else:
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ =True
for model_class in self.all_model_classes:
UpperCAmelCase__ =self.model_tester.seq_length
UpperCAmelCase__ =self.model_tester.num_frames
UpperCAmelCase__ =True
UpperCAmelCase__ =False
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(_UpperCamelCase, _UpperCamelCase ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(_UpperCamelCase ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(_UpperCamelCase, _UpperCamelCase ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(_UpperCamelCase ), self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], )
UpperCAmelCase__ =len(_UpperCamelCase )
# Check attention is always last and order is fine
UpperCAmelCase__ =True
UpperCAmelCase__ =True
UpperCAmelCase__ =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(_UpperCamelCase, _UpperCamelCase ) )
self.assertEqual(out_len + 1, len(_UpperCamelCase ) )
UpperCAmelCase__ =outputs.attentions
self.assertEqual(len(_UpperCamelCase ), self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1], )
def __UpperCAmelCase ( self ) -> str:
def check_hidden_states_output(A_, A_, A_ ):
UpperCAmelCase__ =model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ =model(**self._prepare_for_class(_UpperCamelCase, _UpperCamelCase ) )
UpperCAmelCase__ =outputs.hidden_states
UpperCAmelCase__ =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCamelCase ), _UpperCamelCase )
UpperCAmelCase__ =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
UpperCAmelCase__ , UpperCAmelCase__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ =True
check_hidden_states_output(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ =True
check_hidden_states_output(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCAmelCase__ =np.load(SCREAMING_SNAKE_CASE_ )
return list(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
_UpperCamelCase )
UpperCAmelCase__ =self.default_image_processor
UpperCAmelCase__ =prepare_video()
UpperCAmelCase__ =image_processor(video[:8], return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ =model(**_UpperCamelCase )
# verify the logits
UpperCAmelCase__ =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, _UpperCamelCase )
UpperCAmelCase__ =torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _UpperCamelCase, atol=1E-4 ) )
| 625 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 32 | 0 |
"""simple docstring"""
def A ( _A = 1_000 ):
"""simple docstring"""
snake_case_ , snake_case_ :Union[str, Any] = 1, 1
snake_case_ :List[str] = []
for i in range(1, n + 1 ):
snake_case_ :Any = prev_numerator + 2 * prev_denominator
snake_case_ :Union[str, Any] = prev_numerator + prev_denominator
if len(str(SCREAMING_SNAKE_CASE_ ) ) > len(str(SCREAMING_SNAKE_CASE_ ) ):
result.append(SCREAMING_SNAKE_CASE_ )
snake_case_ :Tuple = numerator
snake_case_ :Tuple = denominator
return len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 584 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , A__ ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Optional[Any] = load_tool("""text-classification""" )
self.tool.setup()
A : Optional[int] = load_tool("""text-classification""", remote=_UpperCamelCase )
def _lowerCAmelCase ( self ):
A : int = self.tool("""That\'s quite cool""", ["""positive""", """negative"""] )
self.assertEqual(_UpperCamelCase, """positive""" )
def _lowerCAmelCase ( self ):
A : Optional[int] = self.remote_tool("""That\'s quite cool""", ["""positive""", """negative"""] )
self.assertEqual(_UpperCamelCase, """positive""" )
def _lowerCAmelCase ( self ):
A : Any = self.tool(text="""That\'s quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(_UpperCamelCase, """positive""" )
def _lowerCAmelCase ( self ):
A : Tuple = self.remote_tool(text="""That\'s quite cool""", labels=["""positive""", """negative"""] )
self.assertEqual(_UpperCamelCase, """positive""" )
| 662 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[str]:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : List[Any] ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def A__ ( *SCREAMING_SNAKE_CASE_ : List[str] ) -> Dict:
"""simple docstring"""
def decorator(SCREAMING_SNAKE_CASE_ : Any ):
_UpperCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , '''handle_key''' , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class __UpperCamelCase ( A__ ):
def __new__( cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = super().__new__(cls , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not hasattr(_UpperCamelCase , '''key_handler''' ):
setattr(_UpperCamelCase , '''key_handler''' , {} )
setattr(_UpperCamelCase , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(_UpperCamelCase , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(_UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(_UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def A__ ( cls : Union[str, Any] ) -> Any:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 32 | 0 |
from ..utils import DummyObject, requires_backends
class A_ (metaclass=A__ ):
UpperCAmelCase__ = ["""torch""", """scipy"""]
def __init__( self , *_A , **_A ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _lowercase ( cls , *_A , **_A ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _lowercase ( cls , *_A , **_A ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
| 130 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 0 |
"""simple docstring"""
_UpperCamelCase = [
(1000, """M"""),
(900, """CM"""),
(500, """D"""),
(400, """CD"""),
(100, """C"""),
(90, """XC"""),
(50, """L"""),
(40, """XL"""),
(10, """X"""),
(9, """IX"""),
(5, """V"""),
(4, """IV"""),
(1, """I"""),
]
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
UpperCAmelCase = 0
UpperCAmelCase = 0
while place < len(SCREAMING_SNAKE_CASE_ ):
if (place + 1 < len(SCREAMING_SNAKE_CASE_ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
for arabic, roman in ROMAN:
((UpperCAmelCase) , (UpperCAmelCase)) = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
result.append(roman * factor )
if number == 0:
break
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Tuple = """rwkv"""
__A : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self , _UpperCamelCase=50277 , _UpperCamelCase=1024 , _UpperCamelCase=4096 , _UpperCamelCase=32 , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=1e-5 , _UpperCamelCase=0 , _UpperCamelCase=0 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = context_length
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCAmelCase = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = rescale_every
_UpperCAmelCase = use_cache
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 32 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.