code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a : Optional[Any] = logging.get_logger(__name__)
a : Any = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
a : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a : Union[str, Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
a : List[str] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
a : Optional[int] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
a : str = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
a : Any = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
a : int = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
a : List[str] = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
a : Any = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
a : Optional[int] = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
a : List[Any] = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
a : List[Any] = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
a : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING
a : Union[str, Any] = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING
a : Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a : List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING
a : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a : str = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a : Optional[int] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a : List[str] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
__SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a : Optional[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> Optional[int]:
return EnvironmentCommand()
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> str:
return EnvironmentCommand(args.accelerate_config_file )
class SCREAMING_SNAKE_CASE__ ( __lowercase ):
@staticmethod
def A ( a_ : ArgumentParser ):
"""simple docstring"""
__snake_case = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
download_parser.add_argument(
"--accelerate-config_file" , default=a_ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a_ )
def __init__( self : int , a_ : List[str] , *a_ : Any ):
"""simple docstring"""
__snake_case = accelerate_config_file
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = "not installed"
if is_safetensors_available():
import safetensors
__snake_case = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
__snake_case = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__snake_case = "not installed"
__snake_case = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__snake_case = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a_ ):
__snake_case = load_config_from_file(self._accelerate_config_file ).to_dict()
__snake_case = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(a_ , a_ )
else f'''\t{accelerate_config}'''
)
__snake_case = "not installed"
__snake_case = "NA"
if is_torch_available():
import torch
__snake_case = torch.__version__
__snake_case = torch.cuda.is_available()
__snake_case = "not installed"
__snake_case = "NA"
if is_tf_available():
import tensorflow as tf
__snake_case = tf.__version__
try:
# deprecated in v2.1
__snake_case = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__snake_case = bool(tf.config.list_physical_devices("GPU" ) )
__snake_case = "not installed"
__snake_case = "not installed"
__snake_case = "not installed"
__snake_case = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
__snake_case = flax.__version__
__snake_case = jax.__version__
__snake_case = jaxlib.__version__
__snake_case = jax.lib.xla_bridge.get_backend().platform
__snake_case = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'''{safetensors_version}''',
"Accelerate version": f'''{accelerate_version}''',
"Accelerate config": f'''{accelerate_config_str}''',
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": f'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": f'''{flax_version} ({jax_backend})''',
"Jax version": f'''{jax_version}''',
"JaxLib version": f'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def A ( a_ : Optional[int] ):
"""simple docstring"""
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
# TODO Update this
a : Tuple = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """esm"""
def __init__( self : Any , a_ : List[Any]=None , a_ : Dict=None , a_ : List[str]=None , a_ : str=768 , a_ : Tuple=12 , a_ : Tuple=12 , a_ : Optional[int]=3_072 , a_ : Tuple=0.1 , a_ : Tuple=0.1 , a_ : Optional[Any]=1_026 , a_ : List[str]=0.02 , a_ : str=1e-12 , a_ : List[Any]="absolute" , a_ : Optional[Any]=True , a_ : int=None , a_ : Any=False , a_ : Dict=False , a_ : Optional[Any]=None , a_ : int=None , **a_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__UpperCamelCase , mask_token_id=__UpperCamelCase , **__UpperCamelCase )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = emb_layer_norm_before
__snake_case = token_dropout
__snake_case = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
__snake_case = EsmFoldConfig()
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
__snake_case = EsmFoldConfig(**__UpperCamelCase )
__snake_case = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
__snake_case = get_default_vocab_list()
else:
__snake_case = vocab_list
else:
__snake_case = None
__snake_case = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , __UpperCamelCase ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = super().to_dict()
if isinstance(self.esmfold_config , __UpperCamelCase ):
__snake_case = self.esmfold_config.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = None
def A ( self : Union[str, Any] ):
"""simple docstring"""
if self.trunk is None:
__snake_case = TrunkConfig()
elif isinstance(self.trunk , __UpperCamelCase ):
__snake_case = TrunkConfig(**self.trunk )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = asdict(self )
__snake_case = self.trunk.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 48
__SCREAMING_SNAKE_CASE = 1024
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = None
def A ( self : str ):
"""simple docstring"""
if self.structure_module is None:
__snake_case = StructureModuleConfig()
elif isinstance(self.structure_module , __UpperCamelCase ):
__snake_case = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
__snake_case = self.sequence_state_dim // self.sequence_head_width
__snake_case = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def A ( self : Any ):
"""simple docstring"""
__snake_case = asdict(self )
__snake_case = self.structure_module.to_dict()
return output
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 384
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = 8
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = 1E-8
__SCREAMING_SNAKE_CASE = 1E5
def A ( self : List[str] ):
"""simple docstring"""
return asdict(self )
def __UpperCAmelCase ( ) -> Dict:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a : Optional[int] = NewType('''DataClass''', Any)
a : Tuple = NewType('''DataClassType''', Any)
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> Optional[Any]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> Callable[[str], Any]:
__snake_case = {str(_UpperCAmelCase ): choice for choice in choices}
return lambda _UpperCAmelCase : str_to_choice.get(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( *,
_UpperCAmelCase : Union[str, List[str]] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : Any = dataclasses.MISSING , _UpperCAmelCase : Callable[[], Any] = dataclasses.MISSING , _UpperCAmelCase : dict = None , **_UpperCAmelCase : Tuple , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case = {}
if aliases is not None:
__snake_case = aliases
if help is not None:
__snake_case = help
return dataclasses.field(metadata=_UpperCAmelCase , default=_UpperCAmelCase , default_factory=_UpperCAmelCase , **_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( A_ ):
__SCREAMING_SNAKE_CASE = 42
def __init__( self : Optional[Any] , a_ : int , **a_ : str ):
"""simple docstring"""
if "formatter_class" not in kwargs:
__snake_case = ArgumentDefaultsHelpFormatter
super().__init__(**a_ )
if dataclasses.is_dataclass(a_ ):
__snake_case = [dataclass_types]
__snake_case = list(a_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(a_ )
@staticmethod
def A ( a_ : Optional[int] , a_ : List[str] ):
"""simple docstring"""
__snake_case = f'''--{field.name}'''
__snake_case = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , a_ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
__snake_case = kwargs.pop("aliases" , [] )
if isinstance(a_ , a_ ):
__snake_case = [aliases]
__snake_case = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(a_ , "UnionType" ) and isinstance(a_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(a_ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(a_ ) not in field.type.__args__:
# filter `str` in Union
__snake_case = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case = (
field.type.__args__[0] if isinstance(a_ , field.type.__args__[1] ) else field.type.__args__[1]
)
__snake_case = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case = {}
if origin_type is Literal or (isinstance(field.type , a_ ) and issubclass(field.type , a_ )):
if origin_type is Literal:
__snake_case = field.type.__args__
else:
__snake_case = [x.value for x in field.type]
__snake_case = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
__snake_case = field.default
else:
__snake_case = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case = copy(a_ )
# Hack because type=bool in argparse does not behave as we want.
__snake_case = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case = """?"""
# This is the value that will get picked if we do --field_name (without value)
__snake_case = True
elif isclass(a_ ) and issubclass(a_ , a_ ):
__snake_case = field.type.__args__[0]
__snake_case = """+"""
if field.default_factory is not dataclasses.MISSING:
__snake_case = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case = True
else:
__snake_case = field.type
if field.default is not dataclasses.MISSING:
__snake_case = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case = field.default_factory()
else:
__snake_case = True
parser.add_argument(a_ , *a_ , **a_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case = False
parser.add_argument(f'''--no_{field.name}''' , action="store_false" , dest=field.name , **a_ )
def A ( self : Optional[Any] , a_ : int ):
"""simple docstring"""
if hasattr(a_ , "_argument_group_name" ):
__snake_case = self.add_argument_group(dtype._argument_group_name )
else:
__snake_case = self
try:
__snake_case = get_type_hints(a_ )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(a_ ):
__snake_case = """.""".join(map(a_ , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(a_ ):
if not field.init:
continue
__snake_case = type_hints[field.name]
self._parse_dataclass_field(a_ , a_ )
def A ( self : Optional[int] , a_ : int=None , a_ : str=False , a_ : Any=True , a_ : List[Any]=None , a_ : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__snake_case = []
if args_filename:
args_files.append(Path(a_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case = ArgumentParser()
args_file_parser.add_argument(a_ , type=a_ , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case = args_file_parser.parse_known_args(args=a_ )
__snake_case = vars(a_ ).get(args_file_flag.lstrip("-" ) , a_ )
if cmd_args_file_paths:
args_files.extend([Path(a_ ) for p in cmd_args_file_paths] )
__snake_case = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case = self.parse_known_args(args=a_ )
__snake_case = []
for dtype in self.dataclass_types:
__snake_case = {f.name for f in dataclasses.fields(a_ ) if f.init}
__snake_case = {k: v for k, v in vars(a_ ).items() if k in keys}
for k in keys:
delattr(a_ , a_ )
__snake_case = dtype(**a_ )
outputs.append(a_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(a_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def A ( self : str , a_ : Dict , a_ : Optional[Any] = False ):
"""simple docstring"""
__snake_case = set(args.keys() )
__snake_case = []
for dtype in self.dataclass_types:
__snake_case = {f.name for f in dataclasses.fields(a_ ) if f.init}
__snake_case = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__snake_case = dtype(**a_ )
outputs.append(a_ )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(a_ )}''' )
return tuple(a_ )
def A ( self : Dict , a_ : Any , a_ : List[Any] = False ):
"""simple docstring"""
with open(Path(a_ ) , encoding="utf-8" ) as open_json_file:
__snake_case = json.loads(open_json_file.read() )
__snake_case = self.parse_dict(a_ , allow_extra_keys=a_ )
return tuple(a_ )
def A ( self : str , a_ : Dict , a_ : int = False ):
"""simple docstring"""
__snake_case = self.parse_dict(yaml.safe_load(Path(a_ ).read_text() ) , allow_extra_keys=a_ )
return tuple(a_ )
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a : Union[str, Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class SCREAMING_SNAKE_CASE__ ( __A ):
def __init__( self : Tuple , a_ : Any , a_ : str , a_ : Tuple=None , a_ : List[Any]=1 ):
"""simple docstring"""
__snake_case = tokenizer
__snake_case = dataset
__snake_case = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
__snake_case = n_copies
def __iter__( self : Optional[int] ):
"""simple docstring"""
__snake_case = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
__snake_case = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class SCREAMING_SNAKE_CASE__ ( __A ):
def __init__( self : int , a_ : Any , a_ : List[str] , a_ : List[Any] ):
"""simple docstring"""
__snake_case = start_length
__snake_case = eof_strings
__snake_case = tokenizer
def __call__( self : Optional[Any] , a_ : Union[str, Any] , a_ : Dict , **a_ : Any ):
"""simple docstring"""
__snake_case = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__snake_case = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> int:
__snake_case = re.split("(%s)" % "|".join(lowerCAmelCase__ ) , lowerCAmelCase__ )
# last string should be ""
return "".join(string_list[:-2] )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str=20 , **_UpperCAmelCase : int ) -> Optional[int]:
__snake_case = defaultdict(lowerCAmelCase__ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(lowerCAmelCase__ ) ):
with torch.no_grad():
__snake_case = batch['ids'].shape[-1]
__snake_case = accelerator.unwrap_model(lowerCAmelCase__ ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=lowerCAmelCase__ , **lowerCAmelCase__ )
# each task is generated batch_size times
__snake_case = batch['task_id'].repeat(lowerCAmelCase__ )
__snake_case = accelerator.pad_across_processes(
lowerCAmelCase__ , dim=1 , pad_index=tokenizer.pad_token_id )
__snake_case = accelerator.gather((generated_tokens, generated_tasks) )
__snake_case = generated_tokens.cpu().numpy()
__snake_case = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
gen_token_dict[task].append(lowerCAmelCase__ )
__snake_case = [[] for _ in range(lowerCAmelCase__ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__snake_case = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
code_gens[task].append(remove_last_block(lowerCAmelCase__ ) )
return code_gens
def __UpperCAmelCase ( ) -> int:
__snake_case = HfArgumentParser(lowerCAmelCase__ )
__snake_case = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__snake_case = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__snake_case = 'false'
if args.num_workers is None:
__snake_case = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__snake_case = Accelerator()
set_seed(args.seed , device_specific=lowerCAmelCase__ )
# Load model and tokenizer
__snake_case = AutoTokenizer.from_pretrained(args.model_ckpt )
__snake_case = tokenizer.eos_token
__snake_case = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
__snake_case = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , lowerCAmelCase__ , lowerCAmelCase__ )] ),
}
# Load evaluation dataset and metric
__snake_case = load_dataset("openai_humaneval" )
__snake_case = load_metric("code_eval" )
__snake_case = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
__snake_case = args.n_samples // args.batch_size
__snake_case = TokenizedDataset(lowerCAmelCase__ , human_eval["test"] , n_copies=lowerCAmelCase__ , n_tasks=lowerCAmelCase__ )
# do not confuse args.batch_size, which is actually the num_return_sequences
__snake_case = DataLoader(lowerCAmelCase__ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
__snake_case = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
__snake_case = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
__snake_case = complete_code(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , n_tasks=lowerCAmelCase__ , batch_size=args.batch_size , **lowerCAmelCase__ , )
if accelerator.is_main_process:
__snake_case = []
for task in tqdm(range(lowerCAmelCase__ ) ):
__snake_case = human_eval['test'][task]['test']
__snake_case = F'''check({human_eval["test"][task]["entry_point"]})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
__snake_case = code_eval_metric.compute(
references=lowerCAmelCase__ , predictions=lowerCAmelCase__ , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = logging.get_logger()
# the current default level is logging.WARNING
__snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(lowerCamelCase_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = logging.get_verbosity()
__snake_case = logging.get_logger("transformers.models.bart.tokenization_bart" )
__snake_case = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning(lowerCamelCase_ )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(lowerCamelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def A ( self : Any ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__snake_case = logging.get_logger("transformers.models.bart.tokenization_bart" )
__snake_case = os.getenv("TRANSFORMERS_VERBOSITY" , lowerCamelCase_ )
__snake_case = logging.log_levels[env_level_str]
__snake_case = logging.get_verbosity()
self.assertEqual(
lowerCamelCase_ , lowerCamelCase_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
__snake_case = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def A ( self : List[str] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.logging.getLogger()
with CaptureLogger(lowerCamelCase_ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def A ( self : str ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
__snake_case = logging.get_logger("transformers.models.bart.tokenization_bart" )
__snake_case = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning_advice(lowerCamelCase_ )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(lowerCamelCase_ ) as cl:
logger.warning_advice(lowerCamelCase_ )
self.assertEqual(cl.out , msg + "\n" )
def __UpperCAmelCase ( ) -> List[Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = LDMTextToImagePipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
__SCREAMING_SNAKE_CASE = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = False
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__snake_case = CLIPTextModel(UpperCamelCase__ )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def A ( self : Any , a_ : Optional[int] , a_ : Optional[Any]=0 ):
"""simple docstring"""
if str(UpperCamelCase__ ).startswith("mps" ):
__snake_case = torch.manual_seed(UpperCamelCase__ )
else:
__snake_case = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = LDMTextToImagePipeline(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__snake_case = self.get_dummy_inputs(UpperCamelCase__ )
__snake_case = pipe(**UpperCamelCase__ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__snake_case = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : str , a_ : str , a_ : int=torch.floataa , a_ : Tuple=0 ):
"""simple docstring"""
__snake_case = torch.manual_seed(UpperCamelCase__ )
__snake_case = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 32, 32) )
__snake_case = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__snake_case = self.get_inputs(UpperCamelCase__ )
__snake_case = pipe(**UpperCamelCase__ ).images
__snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__snake_case = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
__snake_case = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int] , a_ : List[Any] , a_ : Optional[Any]=torch.floataa , a_ : int=0 ):
"""simple docstring"""
__snake_case = torch.manual_seed(UpperCamelCase__ )
__snake_case = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 32, 32) )
__snake_case = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A ( self : Any ):
"""simple docstring"""
__snake_case = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__snake_case = self.get_inputs(UpperCamelCase__ )
__snake_case = pipe(**UpperCamelCase__ ).images[0]
__snake_case = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
__snake_case = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Any = logging.get_logger(__name__)
a : Optional[int] = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """van"""
def __init__( self : List[Any] , a_ : List[Any]=224 , a_ : Dict=3 , a_ : str=[7, 3, 3, 3] , a_ : Optional[int]=[4, 2, 2, 2] , a_ : Optional[int]=[64, 128, 320, 512] , a_ : Dict=[3, 3, 12, 3] , a_ : Optional[Any]=[8, 8, 4, 4] , a_ : Tuple="gelu" , a_ : Optional[Any]=0.02 , a_ : Union[str, Any]=1e-6 , a_ : List[Any]=1e-2 , a_ : Dict=0.0 , a_ : Any=0.0 , **a_ : Dict , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
__snake_case = image_size
__snake_case = num_channels
__snake_case = patch_sizes
__snake_case = strides
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = mlp_ratios
__snake_case = hidden_act
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = layer_scale_init_value
__snake_case = drop_path_rate
__snake_case = dropout_rate
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : List[Any] , a_ : List[Any] , a_ : Dict , a_ : Optional[Any] = None , a_ : Any = None , a_ : List[Any] = False , **a_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(features=_A , cache_dir=_A , keep_in_memory=_A , **_A )
__snake_case = Sql(
cache_dir=_A , features=_A , sql=_A , con=_A , **_A , )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = None
__snake_case = None
__snake_case = None
__snake_case = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , )
# Build dataset for splits
__snake_case = self.builder.as_dataset(
split="train" , verification_mode=_A , in_memory=self.keep_in_memory )
return dataset
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , a_ : Tuple , a_ : List[str] , a_ : int , a_ : List[str] = None , a_ : str = None , **a_ : Any , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__snake_case = dataset
__snake_case = name
__snake_case = con
__snake_case = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__snake_case = num_proc
__snake_case = to_sql_kwargs
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.to_sql_kwargs.pop("sql" , _A )
__snake_case = self.to_sql_kwargs.pop("con" , _A )
__snake_case = self.to_sql_kwargs.pop("index" , _A )
__snake_case = self._write(index=_A , **self.to_sql_kwargs )
return written
def A ( self : str , a_ : Any ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = args
__snake_case = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
__snake_case = query_table(
table=self.dataset.data , key=slice(_A , offset + self.batch_size ) , indices=self.dataset._indices , )
__snake_case = batch.to_pandas()
__snake_case = df.to_sql(self.name , self.con , index=_A , **_A )
return num_rows or len(_A )
def A ( self : Union[str, Any] , a_ : Optional[Any] , **a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__snake_case , __snake_case = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _A , _A )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
a : List[str] = 4
a : List[str] = 3
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> Any:
for shard in shards:
for i in range(UpperCAmelCase__ ):
yield {"i": i, "shard": shard}
def __UpperCAmelCase ( ) -> List[str]:
__snake_case = int(os.environ["RANK"] )
__snake_case = int(os.environ["WORLD_SIZE"] )
__snake_case = ArgumentParser()
parser.add_argument("--streaming" , type=UpperCAmelCase__ )
parser.add_argument("--local_rank" , type=UpperCAmelCase__ )
parser.add_argument("--num_workers" , type=UpperCAmelCase__ , default=0 )
__snake_case = parser.parse_args()
__snake_case = args.streaming
__snake_case = args.num_workers
__snake_case = {"shards": [F'''shard_{shard_idx}''' for shard_idx in range(UpperCAmelCase__ )]}
__snake_case = IterableDataset.from_generator(UpperCAmelCase__ , gen_kwargs=UpperCAmelCase__ )
if not streaming:
__snake_case = Dataset.from_list(list(UpperCAmelCase__ ) )
__snake_case = split_dataset_by_node(UpperCAmelCase__ , rank=UpperCAmelCase__ , world_size=UpperCAmelCase__ )
__snake_case = torch.utils.data.DataLoader(UpperCAmelCase__ , num_workers=UpperCAmelCase__ )
__snake_case = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__snake_case = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__snake_case = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : int = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-audio"""
def __init__( self : Optional[Any] , a_ : Any=32 , a_ : List[Any]=768 , a_ : Dict=12 , a_ : Any=12 , a_ : str=3_072 , a_ : Dict="gelu" , a_ : List[str]=0.1 , a_ : Dict=0.1 , a_ : Optional[int]=0.1 , a_ : Any=0.0 , a_ : List[str]=0.1 , a_ : Any=0.1 , a_ : List[Any]=0.02 , a_ : List[str]=1e-5 , a_ : Optional[Any]="gelu" , a_ : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , a_ : Any=(5, 2, 2, 2, 2, 2, 2) , a_ : Any=(10, 3, 3, 3, 3, 2, 2) , a_ : List[Any]=False , a_ : Optional[int]=16 , a_ : int=19 , a_ : Union[str, Any]=5 , a_ : List[Any]=0.05 , a_ : Optional[Any]=10 , a_ : str=2 , a_ : str=0.0 , a_ : Dict=10 , a_ : Optional[Any]=0 , a_ : Optional[int]="sum" , a_ : Dict=False , a_ : Optional[int]=False , a_ : List[Any]=256 , a_ : str=(512, 512, 512, 512, 1_500) , a_ : Dict=(5, 3, 3, 1, 1) , a_ : List[Any]=(1, 2, 3, 1, 1) , a_ : Optional[int]=512 , a_ : Optional[int]=0 , a_ : Optional[int]=1 , a_ : str=2 , a_ : Tuple=False , a_ : int=3 , a_ : Any=2 , a_ : Union[str, Any]=3 , a_ : List[Any]=None , **a_ : Tuple , ):
"""simple docstring"""
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
__snake_case = hidden_size
__snake_case = feat_extract_activation
__snake_case = list(a_ )
__snake_case = list(a_ )
__snake_case = list(a_ )
__snake_case = conv_bias
__snake_case = num_conv_pos_embeddings
__snake_case = num_conv_pos_embedding_groups
__snake_case = conv_pos_kernel_size
__snake_case = len(self.conv_dim )
__snake_case = num_hidden_layers
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = num_attention_heads
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = feat_proj_dropout
__snake_case = final_dropout
__snake_case = layerdrop
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = vocab_size
__snake_case = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case = mask_time_prob
__snake_case = mask_time_length
__snake_case = mask_time_min_masks
__snake_case = mask_feature_prob
__snake_case = mask_feature_length
__snake_case = mask_feature_min_masks
# ctc loss
__snake_case = ctc_loss_reduction
__snake_case = ctc_zero_infinity
# adapter
__snake_case = add_adapter
__snake_case = adapter_kernel_size
__snake_case = adapter_stride
__snake_case = num_adapter_layers
__snake_case = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case = list(a_ )
__snake_case = list(a_ )
__snake_case = list(a_ )
__snake_case = xvector_output_dim
@property
def A ( self : Any ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] , a_ : Union[str, Any] = "" , a_ : Optional[int] = False ):
"""simple docstring"""
__snake_case = {}
# A node will be a leaf if the tree contains its word
__snake_case = is_leaf
__snake_case = prefix
def A ( self : str , a_ : Dict ):
"""simple docstring"""
__snake_case = 0
for q, w in zip(self.prefix , a_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def A ( self : str , a_ : Dict ):
"""simple docstring"""
for word in words:
self.insert(a_ )
def A ( self : List[Any] , a_ : List[Any] ):
"""simple docstring"""
if self.prefix == word:
__snake_case = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__snake_case = RadixNode(prefix=a_ , is_leaf=a_ )
else:
__snake_case = self.nodes[word[0]]
__snake_case , __snake_case , __snake_case = incoming_node.match(
a_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(a_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__snake_case = remaining_prefix
__snake_case = self.nodes[matching_string[0]]
__snake_case = RadixNode(a_ , a_ )
__snake_case = aux_node
if remaining_word == "":
__snake_case = True
else:
self.nodes[matching_string[0]].insert(a_ )
def A ( self : List[Any] , a_ : int ):
"""simple docstring"""
__snake_case = self.nodes.get(word[0] , a_ )
if not incoming_node:
return False
else:
__snake_case , __snake_case , __snake_case = incoming_node.match(
a_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(a_ )
def A ( self : Optional[int] , a_ : int ):
"""simple docstring"""
__snake_case = self.nodes.get(word[0] , a_ )
if not incoming_node:
return False
else:
__snake_case , __snake_case , __snake_case = incoming_node.match(
a_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(a_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__snake_case = list(self.nodes.values() )[0]
__snake_case = merging_node.is_leaf
self.prefix += merging_node.prefix
__snake_case = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__snake_case = False
# If there is 1 edge, we merge it with its child
else:
__snake_case = list(incoming_node.nodes.values() )[0]
__snake_case = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__snake_case = merging_node.nodes
return True
def A ( self : List[Any] , a_ : Tuple = 0 ):
"""simple docstring"""
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __UpperCAmelCase ( ) -> List[str]:
__snake_case = "banana bananas bandana band apple all beast".split()
__snake_case = RadixNode()
root.insert_many(_UpperCAmelCase )
assert all(root.find(_UpperCAmelCase ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def __UpperCAmelCase ( ) -> Any:
assert test_trie()
def __UpperCAmelCase ( ) -> Any:
__snake_case = RadixNode()
__snake_case = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(_UpperCAmelCase )
print("Words:" , _UpperCAmelCase )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
a : int = tuple[int, int, int]
a : Union[str, Any] = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
a : List[str] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
a : Optional[Any] = "EGZWVONAHDCLFQMSIPJBYUKXTR"
a : int = "FOBHMDKEXQNRAULPGSJVTYICZW"
a : List[str] = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
a : str = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
a : List[Any] = "RMDJXFUWGISLHVTCQNKYPBEZOA"
a : Tuple = "SGLCPQWZHKXAREONTFBVIYJUDM"
a : List[Any] = "HVSICLTYKQUBXDWAJZOMFGPREN"
a : List[Any] = "RZWQHFMVDBKICJLNTUXAGYPSOE"
a : str = "LFKIJODBEGAMQPXVUHYSTCZRWN"
a : List[str] = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def __UpperCAmelCase ( _UpperCAmelCase : RotorPositionT , _UpperCAmelCase : RotorSelectionT , _UpperCAmelCase : str ) -> int:
if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3:
__snake_case = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(_UpperCAmelCase )
# Checks if rotor positions are valid
__snake_case = rotpos
if not 0 < rotorposa <= len(_UpperCAmelCase ):
__snake_case = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
__snake_case = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
__snake_case = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(_UpperCAmelCase )
# Validates string and returns dict
__snake_case = _plugboard(_UpperCAmelCase )
return rotpos, rotsel, pbdict
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Any:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = F'''Plugboard setting isn\'t type string ({type(_UpperCAmelCase )})'''
raise TypeError(_UpperCAmelCase )
elif len(_UpperCAmelCase ) % 2 != 0:
__snake_case = F'''Odd number of symbols ({len(_UpperCAmelCase )})'''
raise Exception(_UpperCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(" " , "" )
# Checks if all characters are unique
__snake_case = set()
for i in pbstring:
if i not in abc:
__snake_case = F'''\'{i}\' not in list of symbols'''
raise Exception(_UpperCAmelCase )
elif i in tmppbl:
__snake_case = F'''Duplicate symbol ({i})'''
raise Exception(_UpperCAmelCase )
else:
tmppbl.add(_UpperCAmelCase )
del tmppbl
# Created the dictionary
__snake_case = {}
for j in range(0 , len(_UpperCAmelCase ) - 1 , 2 ):
__snake_case = pbstring[j + 1]
__snake_case = pbstring[j]
return pb
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : RotorPositionT , _UpperCAmelCase : RotorSelectionT = (rotora, rotora, rotora) , _UpperCAmelCase : str = "" , ) -> List[Any]:
__snake_case = text.upper()
__snake_case = _validator(
_UpperCAmelCase , _UpperCAmelCase , plugb.upper() )
__snake_case = rotor_position
__snake_case = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__snake_case = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__snake_case = plugboard[symbol]
# rotor ra --------------------------
__snake_case = abc.index(_UpperCAmelCase ) + rotorposa
__snake_case = rotora[index % len(_UpperCAmelCase )]
# rotor rb --------------------------
__snake_case = abc.index(_UpperCAmelCase ) + rotorposa
__snake_case = rotora[index % len(_UpperCAmelCase )]
# rotor rc --------------------------
__snake_case = abc.index(_UpperCAmelCase ) + rotorposa
__snake_case = rotora[index % len(_UpperCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__snake_case = reflector[symbol]
# 2nd rotors
__snake_case = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
__snake_case = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
__snake_case = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__snake_case = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
__snake_case = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
__snake_case = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
__snake_case = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
a : Dict = "This is my Python script that emulates the Enigma machine from WWII."
a : Dict = (1, 1, 1)
a : Any = "pictures"
a : int = (rotora, rotora, rotora)
a : Dict = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
a : Dict = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
a : Any = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
a : Dict = BeautifulSoup(res.text, '''html.parser''')
a : Optional[Any] = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(F'''https://google.com{link.get('href')}''')
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Dict:
if "model" in orig_key:
__snake_case = orig_key.replace("model." , "" )
if "norm1" in orig_key:
__snake_case = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
__snake_case = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
__snake_case = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
__snake_case = orig_key.split("." )[0].split("_" )[-1]
__snake_case = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
__snake_case = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
__snake_case = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
__snake_case = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
__snake_case = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
__snake_case = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
__snake_case = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
__snake_case = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
__snake_case = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
__snake_case = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
__snake_case = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
__snake_case = "yoso." + orig_key
return orig_key
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(UpperCamelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__snake_case = val
__snake_case = orig_state_dict["cls.predictions.decoder.bias"]
__snake_case = torch.arange(UpperCamelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> Tuple:
__snake_case = torch.load(UpperCamelCase__ , map_location="cpu" )["model_state_dict"]
__snake_case = YosoConfig.from_json_file(UpperCamelCase__ )
__snake_case = YosoForMaskedLM(UpperCamelCase__ )
__snake_case = convert_checkpoint_helper(config.max_position_embeddings , UpperCamelCase__ )
print(model.load_state_dict(UpperCamelCase__ ) )
model.eval()
model.save_pretrained(UpperCamelCase__ )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : str = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
import re
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> bool:
__snake_case = re.compile(
R"^(?:0|94|\+94|0{2}94)" R"7(0|1|2|4|5|6|7|8)" R"(-| |)" R"\d{7}$" )
return bool(re.search(lowerCamelCase__ , lowerCamelCase__ ) )
if __name__ == "__main__":
a : Any = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
'''simple docstring'''
import functools
from typing import Any
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : list[str] ) -> bool:
# Validation
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not all(
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
__snake_case = {}
__snake_case = "WORD_KEEPER"
for word in words:
__snake_case = trie
for c in word:
if c not in trie_node:
__snake_case = {}
__snake_case = trie_node[c]
__snake_case = True
__snake_case = len(_UpperCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_UpperCAmelCase : int ) -> bool:
if index == len_string:
return True
__snake_case = trie
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = trie_node.get(string[i] , _UpperCAmelCase )
if trie_node is None:
return False
if trie_node.get(_UpperCAmelCase , _UpperCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Any = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """informer"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Optional[int] , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = None , a_ : Optional[Union[str, bool]] = "mean" , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 64 , a_ : int = 32 , a_ : int = 32 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : bool = True , a_ : str = "gelu" , a_ : float = 0.05 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 100 , a_ : float = 0.02 , a_ : str=True , a_ : str = "prob" , a_ : int = 5 , a_ : bool = True , **a_ : int , ):
"""simple docstring"""
__snake_case = prediction_length
__snake_case = context_length or prediction_length
__snake_case = distribution_output
__snake_case = loss
__snake_case = input_size
__snake_case = num_time_features
__snake_case = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__snake_case = scaling
__snake_case = num_dynamic_real_features
__snake_case = num_static_real_features
__snake_case = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
__snake_case = cardinality
else:
__snake_case = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
__snake_case = embedding_dimension
else:
__snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__snake_case = num_parallel_samples
# Transformer architecture configuration
__snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
__snake_case = d_model
__snake_case = encoder_attention_heads
__snake_case = decoder_attention_heads
__snake_case = encoder_ffn_dim
__snake_case = decoder_ffn_dim
__snake_case = encoder_layers
__snake_case = decoder_layers
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = activation_function
__snake_case = init_std
__snake_case = use_cache
# Informer
__snake_case = attention_type
__snake_case = sampling_factor
__snake_case = distil
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : int = logging.get_logger(__name__)
a : List[Any] = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Dict ) -> Optional[Any]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
__snake_case = TOKENIZER_CLASSES
else:
__snake_case = {tokenizer_name: getattr(_UpperCAmelCase , tokenizer_name + "Fast" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
__snake_case = TOKENIZER_CLASSES[tokenizer_name]
__snake_case = True
if checkpoint_name is None:
__snake_case = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__snake_case = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
__snake_case = tokenizer_class.from_pretrained(_UpperCAmelCase , force_download=_UpperCAmelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
__snake_case , __snake_case = checkpoint.split("/" )
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
elif add_prefix:
__snake_case = checkpoint
__snake_case = dump_path
else:
__snake_case = None
__snake_case = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__snake_case = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__snake_case = file_path.split(_UpperCAmelCase )[-1][0]
if next_char == "/":
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
__snake_case = tokenizer.save_pretrained(
_UpperCAmelCase , legacy_format=_UpperCAmelCase , filename_prefix=_UpperCAmelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_UpperCAmelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
a : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __UpperCAmelCase ( _UpperCAmelCase : str = "" ) -> bool:
if len(_UpperCAmelCase ) == 0:
return True
__snake_case = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__snake_case = {}
for character in lower_case_input_str:
__snake_case = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
__snake_case = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __UpperCAmelCase ( _UpperCAmelCase : str = "" ) -> None:
print("\nFor string = " , _UpperCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
a : Dict = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
a : Tuple = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {'' if status else 'not '}be rearranged as a palindrome''')
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = TextToVideoSDPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__SCREAMING_SNAKE_CASE = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
__snake_case = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
__snake_case = CLIPTextModel(a_ )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A ( self : Union[str, Any] , a_ : str , a_ : Optional[int]=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A ( self : Any ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = TextToVideoSDPipeline(**a_ )
__snake_case = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = "np"
__snake_case = sd_pipe(**a_ ).frames
__snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__snake_case = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : str ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=a_ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A ( self : Tuple ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a_ , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : List[Any] ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
__snake_case = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__snake_case = pipe.to("cuda" )
__snake_case = "Spiderman is surfing"
__snake_case = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case = pipe(a_ , generator=a_ , num_inference_steps=25 , output_type="pt" ).frames
__snake_case = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
__snake_case = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
__snake_case = pipe.to("cuda" )
__snake_case = "Spiderman is surfing"
__snake_case = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case = pipe(a_ , generator=a_ , num_inference_steps=2 , output_type="pt" ).frames
__snake_case = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __UpperCAmelCase ( _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : List[str]=1_00 , _UpperCAmelCase : Optional[int]=10_26 , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict="data/tokenized_stories_train_wikitext103.jbl" , _UpperCAmelCase : Optional[Any]="igf_context_pairs.jbl" , ) -> Any:
set_seed(3 )
# generate train_data and objective_set
__snake_case , __snake_case = generate_datasets(
_UpperCAmelCase , _UpperCAmelCase , number=_UpperCAmelCase , min_len=10_26 , trim=_UpperCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
__snake_case = load_gpta("gpt2" ).to(_UpperCAmelCase )
print("computing perplexity on objective set" )
__snake_case = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).item()
print("perplexity on objective set:" , _UpperCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any=15 , _UpperCAmelCase : Union[str, Any]=1_28 , _UpperCAmelCase : List[str]=1_00 , _UpperCAmelCase : Optional[Any]="igf_model.pt" , ) -> Any:
set_seed(42 )
# Load pre-trained model
__snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
__snake_case = SecondaryLearner(_UpperCAmelCase )
# Train secondary learner
__snake_case = train_secondary_learner(
_UpperCAmelCase , _UpperCAmelCase , max_epochs=_UpperCAmelCase , batch_size=_UpperCAmelCase , eval_freq=1_00 , igf_model_path=_UpperCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : List[str]=10_00 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[int]=recopy_gpta , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : int=10 , _UpperCAmelCase : str="gpt2_finetuned.pt" , ) -> int:
__snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
__snake_case = RandomSampler(_UpperCAmelCase )
__snake_case = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase )
__snake_case = max_steps // (len(_UpperCAmelCase )) + 1
__snake_case = 0
__snake_case = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCAmelCase )
__snake_case , __snake_case , __snake_case = recopy_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_UpperCAmelCase )
secondary_learner.eval()
__snake_case = []
__snake_case = 0
__snake_case = []
__snake_case = []
# Compute the performance of the transformer model at the beginning
__snake_case = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
test_perps.append(_UpperCAmelCase )
print("Test perplexity, step" , _UpperCAmelCase , ":" , _UpperCAmelCase )
for epoch in range(int(_UpperCAmelCase ) ):
for step, example in enumerate(_UpperCAmelCase ):
torch.cuda.empty_cache()
__snake_case = random.randint(0 , example.size(2 ) - context_len - 1 )
__snake_case = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__snake_case = model(_UpperCAmelCase , labels=_UpperCAmelCase )
__snake_case = True
if secondary_learner is not None:
__snake_case = secondary_learner.forward(
torch.tensor(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_UpperCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__snake_case = -1
if predicted_q < threshold:
__snake_case = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__snake_case = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__snake_case = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__snake_case = compute_perplexity(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
test_perps.append(_UpperCAmelCase )
print("Test perplexity, step" , _UpperCAmelCase , ":" , _UpperCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _UpperCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __UpperCAmelCase ( ) -> int:
__snake_case = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_00 , type=_UpperCAmelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_00 , type=_UpperCAmelCase , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=10_00 , type=_UpperCAmelCase , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_28 , type=_UpperCAmelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_UpperCAmelCase , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=_UpperCAmelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_00 , type=_UpperCAmelCase , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=10_26 , type=_UpperCAmelCase , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_UpperCAmelCase , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=_UpperCAmelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_UpperCAmelCase , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_UpperCAmelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__snake_case = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
__snake_case = training_secondary_learner(
_UpperCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__snake_case = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__snake_case , __snake_case = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_00 , min_len=10_26 , trim=_UpperCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCAmelCase , secondary_learner=_UpperCAmelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
a : List[str] = False
a : Optional[int] = False
def __UpperCAmelCase ( _UpperCAmelCase : Namespace ) -> Union[str, Any]:
return TrainCommand(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@staticmethod
def A ( a_ : ArgumentParser ):
"""simple docstring"""
__snake_case = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=a_ , required=a_ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=a_ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=a_ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=a_ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=a_ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=a_ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=a_ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=a_ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=a_ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=a_ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=a_ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=a_ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=a_ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=a_ )
def __init__( self : Union[str, Any] , a_ : Namespace ):
"""simple docstring"""
__snake_case = logging.get_logger("transformers-cli/training" )
__snake_case = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=a_ )
__snake_case = args.output
__snake_case = args.column_label
__snake_case = args.column_text
__snake_case = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
__snake_case = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
__snake_case = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
__snake_case = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__snake_case = args.validation_split
__snake_case = args.train_batch_size
__snake_case = args.valid_batch_size
__snake_case = args.learning_rate
__snake_case = args.adam_epsilon
def A ( self : int ):
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def A ( self : Dict ):
"""simple docstring"""
raise NotImplementedError
def A ( self : Optional[int] ):
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : str = '''▁'''
a : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a : Optional[Any] = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
a : List[str] = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1_024,
}
# fmt: off
a : List[str] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
def __init__( self : Optional[int] , a_ : Any , a_ : int=None , a_ : List[str]=None , a_ : str="</s>" , a_ : Dict="</s>" , a_ : Optional[Any]="<s>" , a_ : Union[str, Any]="<unk>" , a_ : Tuple="<pad>" , a_ : str="<mask>" , a_ : Optional[Dict[str, Any]] = None , **a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
__snake_case = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=a_ , tgt_lang=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , cls_token=a_ , pad_token=a_ , mask_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a_ ) )
__snake_case = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__snake_case = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__snake_case = 1
__snake_case = len(self.sp_model )
__snake_case = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a_ )
}
__snake_case = {v: k for k, v in self.lang_code_to_id.items()}
__snake_case = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__snake_case = src_lang if src_lang is not None else "en_XX"
__snake_case = self.lang_code_to_id[self._src_lang]
__snake_case = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def A ( self : Dict ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def A ( self : List[Any] , a_ : str ):
"""simple docstring"""
__snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__( self : Optional[int] , a_ : Dict ):
"""simple docstring"""
__snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Any ):
"""simple docstring"""
__snake_case = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : int , a_ : str ):
"""simple docstring"""
return self.sp_model.encode(a_ , out_type=a_ )
def A ( self : List[str] , a_ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(a_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : Dict , a_ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : str , a_ : Any ):
"""simple docstring"""
__snake_case = []
__snake_case = ""
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(a_ )
__snake_case = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def A ( self : Dict , a_ : str , a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__snake_case = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def A ( self : Dict , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
__snake_case = [1] * len(self.prefix_tokens )
__snake_case = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a_ )) + suffix_ones
return prefix_ones + ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A ( self : Union[str, Any] , a_ : Tuple , a_ : str , a_ : Optional[str] , a_ : Optional[str] , **a_ : str ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
__snake_case = src_lang
__snake_case = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case = self.convert_tokens_to_ids(a_ )
__snake_case = tgt_lang_id
return inputs
def A ( self : Optional[Any] , a_ : List[str] , a_ : str = "en_XX" , a_ : Optional[List[str]] = None , a_ : str = "ro_RO" , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = src_lang
__snake_case = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def A ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A ( self : Optional[Any] , a_ : str ):
"""simple docstring"""
__snake_case = self.lang_code_to_id[src_lang]
__snake_case = [self.cur_lang_code_id]
__snake_case = [self.eos_token_id]
def A ( self : int , a_ : str ):
"""simple docstring"""
__snake_case = self.lang_code_to_id[tgt_lang]
__snake_case = [self.cur_lang_code_id]
__snake_case = [self.eos_token_id]
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def A ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def A ( self : int ):
"""simple docstring"""
__snake_case = self.dummy_uncond_unet
__snake_case = PNDMScheduler()
__snake_case = PNDMPipeline(unet=a_ , scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
__snake_case = torch.manual_seed(0 )
__snake_case = pndm(generator=a_ , num_inference_steps=20 , output_type="numpy" ).images
__snake_case = torch.manual_seed(0 )
__snake_case = pndm(generator=a_ , num_inference_steps=20 , output_type="numpy" , return_dict=a_ )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = "google/ddpm-cifar10-32"
__snake_case = UNetaDModel.from_pretrained(a_ )
__snake_case = PNDMScheduler()
__snake_case = PNDMPipeline(unet=a_ , scheduler=a_ )
pndm.to(a_ )
pndm.set_progress_bar_config(disable=a_ )
__snake_case = torch.manual_seed(0 )
__snake_case = pndm(generator=a_ , output_type="numpy" ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__snake_case = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = JukeboxTokenizer
__SCREAMING_SNAKE_CASE = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def A ( self : List[Any] ):
"""simple docstring"""
import torch
__snake_case = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" )
__snake_case = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def A ( self : Any ):
"""simple docstring"""
import torch
__snake_case = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" )
__snake_case = tokenizer(**self.metas )["input_ids"]
# fmt: off
__snake_case = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a : Any = ''''''
a : List[str] = ''''''
a : List[Any] = ''''''
a : List[Any] = 1 # (0 is vertical, 1 is horizontal)
def __UpperCAmelCase ( ) -> None:
__snake_case , __snake_case = get_dataset(_UpperCAmelCase , _UpperCAmelCase )
print("Processing..." )
__snake_case , __snake_case , __snake_case = update_image_and_anno(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for index, image in enumerate(_UpperCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__snake_case = random_chars(32 )
__snake_case = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
__snake_case = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(_UpperCAmelCase )} with {file_name}''' )
__snake_case = []
for anno in new_annos[index]:
__snake_case = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(_UpperCAmelCase )
with open(F'''/{file_root}.txt''' , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> tuple[list, list]:
__snake_case = []
__snake_case = []
for label_file in glob.glob(os.path.join(_UpperCAmelCase , "*.txt" ) ):
__snake_case = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_UpperCAmelCase ) as in_file:
__snake_case = in_file.readlines()
__snake_case = os.path.join(_UpperCAmelCase , F'''{label_name}.jpg''' )
__snake_case = []
for obj_list in obj_lists:
__snake_case = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int = 1 ) -> tuple[list, list, list]:
__snake_case = []
__snake_case = []
__snake_case = []
for idx in range(len(_UpperCAmelCase ) ):
__snake_case = []
__snake_case = img_list[idx]
path_list.append(_UpperCAmelCase )
__snake_case = anno_list[idx]
__snake_case = cva.imread(_UpperCAmelCase )
if flip_type == 1:
__snake_case = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__snake_case = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__snake_case = cva.flip(_UpperCAmelCase , _UpperCAmelCase )
for bbox in img_annos:
__snake_case = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(_UpperCAmelCase )
new_imgs_list.append(_UpperCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def __UpperCAmelCase ( _UpperCAmelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__snake_case = ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> Union[str, Any]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = np.full((len(_UpperCAmelCase ), sequence_length, 2) , _UpperCAmelCase )
else:
__snake_case = np.full((len(_UpperCAmelCase ), sequence_length) , _UpperCAmelCase )
for i, tensor in enumerate(_UpperCAmelCase ):
if padding_side == "right":
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = tensor[:sequence_length]
else:
__snake_case = tensor[:sequence_length]
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = tensor[:sequence_length]
else:
__snake_case = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> Union[str, Any]:
__snake_case = ord(_UpperCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
__snake_case = unicodedata.category(_UpperCAmelCase )
if cat.startswith("P" ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = -100
__SCREAMING_SNAKE_CASE = """pt"""
def A ( self : List[Any] , a_ : str ):
"""simple docstring"""
import torch
__snake_case = "label" if "label" in features[0].keys() else "labels"
__snake_case = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__snake_case = self.tokenizer.pad(
a_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
__snake_case = torch.tensor(batch["entity_ids"] ).shape[1]
__snake_case = self.tokenizer.padding_side
if padding_side == "right":
__snake_case = [
list(a_ ) + [self.label_pad_token_id] * (sequence_length - len(a_ )) for label in labels
]
else:
__snake_case = [
[self.label_pad_token_id] * (sequence_length - len(a_ )) + list(a_ ) for label in labels
]
__snake_case = [feature["ner_tags"] for feature in features]
__snake_case = padding_tensor(a_ , -1 , a_ , a_ )
__snake_case = [feature["original_entity_spans"] for feature in features]
__snake_case = padding_tensor(a_ , (-1, -1) , a_ , a_ )
__snake_case = {k: torch.tensor(a_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> np.ndarray:
__snake_case = int(np.ceil((x_end - xa) / step_size ) )
__snake_case = np.zeros((n + 1,) )
__snake_case = ya
__snake_case = xa
for k in range(_UpperCAmelCase ):
__snake_case = y[k] + step_size * ode_func(_UpperCAmelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> int:
__snake_case = hf_hub_url(repo_id=_UpperCAmelCase , path=_UpperCAmelCase , revision=_UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_UpperCAmelCase )}'''
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
def A__ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict=0.999 , _UpperCAmelCase : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__snake_case = []
for i in range(_UpperCAmelCase ):
__snake_case = i / num_diffusion_timesteps
__snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_UpperCAmelCase ) / alpha_bar_fn(_UpperCAmelCase ) , _UpperCAmelCase ) )
return torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self : int , a_ : int = 1_000 , a_ : float = 0.0001 , a_ : float = 0.02 , a_ : str = "linear" , a_ : Optional[Union[np.ndarray, List[float]]] = None , a_ : bool = True , a_ : bool = True , a_ : int = 0 , a_ : str = "epsilon" , a_ : float = 1.0 , **a_ : Any , ):
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , a_ ) is not None:
__snake_case = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , a_ , standard_warn=a_ )
__snake_case = kwargs["set_alpha_to_one"]
if trained_betas is not None:
__snake_case = torch.tensor(a_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__snake_case = torch.linspace(a_ , a_ , a_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case = betas_for_alpha_bar(a_ )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__snake_case = 1.0 - self.betas
__snake_case = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
__snake_case = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
__snake_case = 1.0
# setable values
__snake_case = None
__snake_case = torch.from_numpy(np.arange(0 , a_ ).copy().astype(np.intaa ) )
def A ( self : str , a_ : torch.FloatTensor , a_ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : Optional[Any] , a_ : int , a_ : Union[str, torch.device] = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
__snake_case = num_inference_steps
__snake_case = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__snake_case = (np.arange(0 , a_ ) * step_ratio).round().copy().astype(np.intaa )
__snake_case = torch.from_numpy(a_ ).to(a_ )
self.timesteps += self.config.steps_offset
def A ( self : str , a_ : torch.FloatTensor , a_ : int , a_ : torch.FloatTensor , a_ : float = 0.0 , a_ : bool = False , a_ : Optional[torch.FloatTensor] = None , a_ : bool = True , ):
"""simple docstring"""
__snake_case = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
__snake_case = self.alphas_cumprod[timestep]
__snake_case = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
__snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
__snake_case = model_output
elif self.config.prediction_type == "sample":
__snake_case = model_output
__snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
__snake_case = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
__snake_case = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
__snake_case = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=a_ , pred_original_sample=a_ )
def __len__( self : Optional[Any] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
a : List[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return 12
@property
def A ( self : List[Any] ):
"""simple docstring"""
return 12
@property
def A ( self : Tuple ):
"""simple docstring"""
return 32
@property
def A ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def A ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(a_ )
@property
def A ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = 12
__snake_case = 12
__snake_case = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
__snake_case = TransformeraDModel(**a_ )
return model
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = "cpu"
__snake_case = self.dummy_vqvae
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_transformer
__snake_case = VQDiffusionScheduler(self.num_embed )
__snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=a_ )
__snake_case = VQDiffusionPipeline(
vqvae=a_ , text_encoder=a_ , tokenizer=a_ , transformer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "teddy bear playing in the pool"
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="np" )
__snake_case = output.images
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipe(
[prompt] , generator=a_ , output_type="np" , return_dict=a_ , num_inference_steps=2 )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = "cpu"
__snake_case = self.dummy_vqvae
__snake_case = self.dummy_text_encoder
__snake_case = self.dummy_tokenizer
__snake_case = self.dummy_transformer
__snake_case = VQDiffusionScheduler(self.num_embed )
__snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=a_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__snake_case = VQDiffusionPipeline(
vqvae=a_ , text_encoder=a_ , tokenizer=a_ , transformer=a_ , scheduler=a_ , learned_classifier_free_sampling_embeddings=a_ , )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "teddy bear playing in the pool"
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipe([prompt] , generator=a_ , num_inference_steps=2 , output_type="np" )
__snake_case = output.images
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipe(
[prompt] , generator=a_ , output_type="np" , return_dict=a_ , num_inference_steps=2 )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
__snake_case = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
__snake_case = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__snake_case = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a_ , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
from math import factorial
def __UpperCAmelCase ( _UpperCAmelCase : int = 1_00 ) -> int:
return sum(int(_UpperCAmelCase ) for x in str(factorial(_UpperCAmelCase ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """dandelin/vilt-b32-finetuned-vqa"""
__SCREAMING_SNAKE_CASE = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__SCREAMING_SNAKE_CASE = """image_qa"""
__SCREAMING_SNAKE_CASE = AutoProcessor
__SCREAMING_SNAKE_CASE = AutoModelForVisualQuestionAnswering
__SCREAMING_SNAKE_CASE = ["""image""", """text"""]
__SCREAMING_SNAKE_CASE = ["""text"""]
def __init__( self : str , *a_ : Dict , **a_ : Dict ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*a_ , **a_ )
def A ( self : Tuple , a_ : "Image" , a_ : str ):
"""simple docstring"""
return self.pre_processor(a_ , a_ , return_tensors="pt" )
def A ( self : Dict , a_ : Union[str, Any] ):
"""simple docstring"""
with torch.no_grad():
return self.model(**a_ ).logits
def A ( self : List[Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a : Optional[Any] = 8
def __UpperCAmelCase ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=BITS ) -> str:
__snake_case = x.device
__snake_case = (x * 2_55).int().clamp(0 , 2_55 )
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase )
__snake_case = rearrange(_UpperCAmelCase , "d -> d 1 1" )
__snake_case = rearrange(_UpperCAmelCase , "b c h w -> b c 1 h w" )
__snake_case = ((x & mask) != 0).float()
__snake_case = rearrange(_UpperCAmelCase , "b c d h w -> b (c d) h w" )
__snake_case = bits * 2 - 1
return bits
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=BITS ) -> Optional[Any]:
__snake_case = x.device
__snake_case = (x > 0).int()
__snake_case = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_UpperCAmelCase , dtype=torch.intaa )
__snake_case = rearrange(_UpperCAmelCase , "d -> d 1 1" )
__snake_case = rearrange(_UpperCAmelCase , "b (c d) h w -> b c d h w" , d=8 )
__snake_case = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def __UpperCAmelCase ( self : Union[str, Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__snake_case = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__snake_case = self.alphas_cumprod[timestep]
__snake_case = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__snake_case = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__snake_case = self._get_variance(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__snake_case = model_output.device if torch.is_tensor(_UpperCAmelCase ) else "cpu"
__snake_case = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_UpperCAmelCase ).to(_UpperCAmelCase )
__snake_case = self._get_variance(_UpperCAmelCase , _UpperCAmelCase ) ** 0.5 * eta * noise
__snake_case = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
def __UpperCAmelCase ( self : Union[str, Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Dict="epsilon" , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
__snake_case = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__snake_case , __snake_case = torch.split(_UpperCAmelCase , sample.shape[1] , dim=1 )
else:
__snake_case = None
# 1. compute alphas, betas
__snake_case = self.alphas_cumprod[t]
__snake_case = self.alphas_cumprod[t - 1] if t > 0 else self.one
__snake_case = 1 - alpha_prod_t
__snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__snake_case = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
__snake_case = self.bit_scale
if self.config.clip_sample:
__snake_case = torch.clamp(_UpperCAmelCase , -scale , _UpperCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__snake_case = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__snake_case = 0
if t > 0:
__snake_case = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_UpperCAmelCase ).to(model_output.device )
__snake_case = (self._get_variance(_UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
__snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[Any] , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, DDPMScheduler] , a_ : Optional[float] = 1.0 , ):
"""simple docstring"""
super().__init__()
__snake_case = bit_scale
__snake_case = (
ddim_bit_scheduler_step if isinstance(a_ , a_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self : Dict , a_ : Optional[int] = 256 , a_ : Optional[int] = 256 , a_ : Optional[int] = 50 , a_ : Optional[torch.Generator] = None , a_ : Optional[int] = 1 , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : int , ):
"""simple docstring"""
__snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=a_ , )
__snake_case = decimal_to_bits(a_ ) * self.bit_scale
__snake_case = latents.to(self.device )
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__snake_case = self.unet(a_ , a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(a_ , a_ , a_ ).prev_sample
__snake_case = bits_to_decimal(a_ )
if output_type == "pil":
__snake_case = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None , _UpperCAmelCase : dict[str, float] | None = None , _UpperCAmelCase : bool = False , ) -> tuple[int, float, str]:
__snake_case = cipher_alphabet or [chr(_UpperCAmelCase ) for i in range(97 , 1_23 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
__snake_case = {
"a": 0.0_8497,
"b": 0.0_1492,
"c": 0.0_2202,
"d": 0.0_4253,
"e": 0.1_1162,
"f": 0.0_2228,
"g": 0.0_2015,
"h": 0.0_6094,
"i": 0.0_7546,
"j": 0.0_0153,
"k": 0.0_1292,
"l": 0.0_4025,
"m": 0.0_2406,
"n": 0.0_6749,
"o": 0.0_7507,
"p": 0.0_1929,
"q": 0.0_0095,
"r": 0.0_7587,
"s": 0.0_6327,
"t": 0.0_9356,
"u": 0.0_2758,
"v": 0.0_0978,
"w": 0.0_2560,
"x": 0.0_0150,
"y": 0.0_1994,
"z": 0.0_0077,
}
else:
# Custom frequencies dictionary
__snake_case = frequencies_dict
if not case_sensitive:
__snake_case = ciphertext.lower()
# Chi squared statistic values
__snake_case = {}
# cycle through all of the shifts
for shift in range(len(_UpperCAmelCase ) ):
__snake_case = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
__snake_case = (alphabet_letters.index(letter.lower() ) - shift) % len(
_UpperCAmelCase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
__snake_case = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
__snake_case = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
__snake_case = decrypted_with_shift.lower().count(_UpperCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__snake_case = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__snake_case = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
__snake_case = decrypted_with_shift.count(_UpperCAmelCase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
__snake_case = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
__snake_case = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
__snake_case = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_UpperCAmelCase : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
__snake_case = min(
_UpperCAmelCase , key=_UpperCAmelCase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
__snake_case
) , (
__snake_case
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> Dict:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : List[Any] , a_ : nn.Module , a_ : int ):
"""simple docstring"""
super().__init__()
__snake_case = module
__snake_case = nn.Sequential(
nn.Linear(module.in_features , a_ , bias=a_ ) , nn.Linear(a_ , module.out_features , bias=a_ ) , )
__snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A ( self : Optional[int] , a_ : Optional[Any] , *a_ : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.module(a_ , *a_ , **a_ ) + self.adapter(a_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__SCREAMING_SNAKE_CASE = """bigscience/bloom-1b7"""
# Constant values
__SCREAMING_SNAKE_CASE = 2.109_6595_5269_2574
__SCREAMING_SNAKE_CASE = """Hello my name is"""
__SCREAMING_SNAKE_CASE = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
__SCREAMING_SNAKE_CASE = 10
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = AutoTokenizer.from_pretrained(self.model_name )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : str ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
def A ( self : str ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.model_abit.config
self.assertTrue(hasattr(a_ , "quantization_config" ) )
__snake_case = config.to_dict()
__snake_case = config.to_diff_dict()
__snake_case = config.to_json_string()
def A ( self : Tuple ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__snake_case = self.model_fpaa.get_memory_footprint()
__snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A ( self : Optional[int] ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = BitsAndBytesConfig()
__snake_case = True
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS )
def A ( self : str ):
"""simple docstring"""
with self.assertRaises(a_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = BitsAndBytesConfig()
with self.assertRaises(a_ ):
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a_ , load_in_abit=a_ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(a_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(a_ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(a_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
__snake_case = self.model_fpaa.to(torch.floataa )
__snake_case = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__snake_case = self.model_fpaa.to("cpu" )
# Check this does not throw an error
__snake_case = self.model_fpaa.half()
# Check this does not throw an error
__snake_case = self.model_fpaa.float()
def A ( self : str ):
"""simple docstring"""
__snake_case = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=a_ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def A ( cls : Union[str, Any] ):
"""simple docstring"""
__snake_case = "t5-small"
__snake_case = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
__snake_case = AutoTokenizer.from_pretrained(cls.model_name )
__snake_case = "Translate in German: Hello, my dog is cute"
def A ( self : Optional[Any] ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
__snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
__snake_case = None
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
__snake_case = modules
def A ( self : str ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
# test with `flan-t5-small`
__snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a_ , device_map="auto" )
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
__snake_case = model.generate(**a_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : str ):
"""simple docstring"""
super().setUp()
# model_name
__snake_case = "bigscience/bloom-560m"
__snake_case = "t5-small"
# Different types of model
__snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
# Sequence classification model
__snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a_ , device_map="auto" )
# CausalLM model
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ , device_map="auto" )
# Seq2seq model
__snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a_ , device_map="auto" )
def A ( self : List[str] ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A ( self : int ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : str ):
"""simple docstring"""
super().setUp()
def A ( self : Dict ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A ( self : str ):
"""simple docstring"""
__snake_case = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
def A ( self : Dict ):
"""simple docstring"""
__snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a_ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__snake_case = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
__snake_case = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a_ ) , self.EXPECTED_OUTPUTS )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = "facebook/opt-350m"
super().setUp()
def A ( self : str ):
"""simple docstring"""
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
__snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a_ ) ):
__snake_case = LoRALayer(module.q_proj , rank=16 )
__snake_case = LoRALayer(module.k_proj , rank=16 )
__snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__snake_case = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__snake_case = model.forward(**a_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(a_ , a_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(a_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """gpt2-xl"""
__SCREAMING_SNAKE_CASE = 3.3191_8548_5415_2187
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> str:
__snake_case = int(_UpperCAmelCase )
assert noofclusters < len(_UpperCAmelCase )
# Find out the dimensionality
__snake_case = len(vectors[0] )
# Will help select random centroids from among the available vectors
__snake_case = list(range(len(_UpperCAmelCase ) ) )
shuffle(_UpperCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__snake_case = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__snake_case = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__snake_case = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_UpperCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
__snake_case = tf.placeholder("float64" , [dim] )
__snake_case = []
for centroid in centroids:
cent_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__snake_case = [tf.Variable(0 ) for i in range(len(_UpperCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__snake_case = tf.placeholder("int32" )
__snake_case = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_UpperCAmelCase , _UpperCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__snake_case = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__snake_case = tf.reduce_mean(_UpperCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__snake_case = tf.placeholder("float" , [dim] )
__snake_case = tf.placeholder("float" , [dim] )
__snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_UpperCAmelCase , _UpperCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__snake_case = tf.placeholder("float" , [noofclusters] )
__snake_case = tf.argmin(_UpperCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__snake_case = tf.initialize_all_variables()
# Initialize all variables
sess.run(_UpperCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__snake_case = 1_00
for _ in range(_UpperCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_UpperCAmelCase ) ):
__snake_case = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__snake_case = [
sess.run(_UpperCAmelCase , feed_dict={va: vect, va: sess.run(_UpperCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__snake_case = sess.run(
_UpperCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_UpperCAmelCase ):
# Collect all the vectors assigned to this cluster
__snake_case = [
vectors[i]
for i in range(len(_UpperCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__snake_case = sess.run(
_UpperCAmelCase , feed_dict={mean_input: array(_UpperCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__snake_case = sess.run(_UpperCAmelCase )
__snake_case = sess.run(_UpperCAmelCase )
return centroids, assignments
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
a : str = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Tuple , a_ : int = 14 ):
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
__snake_case = primes[group]["prime"]
__snake_case = primes[group]["generator"]
__snake_case = int(hexlify(urandom(32 ) ) , base=16 )
def A ( self : Dict ):
"""simple docstring"""
return hex(self.__private_key )[2:]
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = pow(self.generator , self.__private_key , self.prime )
return hex(a_ )[2:]
def A ( self : Tuple , a_ : int ):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(a_ , (self.prime - 1) // 2 , self.prime ) == 1
)
def A ( self : Tuple , a_ : str ):
"""simple docstring"""
__snake_case = int(a_ , base=16 )
if not self.is_valid_public_key(a_ ):
raise ValueError("Invalid public key" )
__snake_case = pow(a_ , self.__private_key , self.prime )
return shaaaa(str(a_ ).encode() ).hexdigest()
@staticmethod
def A ( a_ : int , a_ : int ):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(a_ , (prime - 1) // 2 , a_ ) == 1
)
@staticmethod
def A ( a_ : str , a_ : str , a_ : int = 14 ):
"""simple docstring"""
__snake_case = int(a_ , base=16 )
__snake_case = int(a_ , base=16 )
__snake_case = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(a_ , a_ ):
raise ValueError("Invalid public key" )
__snake_case = pow(a_ , a_ , a_ )
return shaaaa(str(a_ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int = 10**12 ) -> int:
__snake_case = 1
__snake_case = 0
__snake_case = 1
__snake_case = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
__snake_case = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
__snake_case = flatten_dict(_UpperCAmelCase )
return flax_params
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> List[Any]:
__snake_case = {}
__snake_case = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
__snake_case = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__snake_case = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__snake_case = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__snake_case = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__snake_case = re.sub(R"layers_(\d+)" , R"layer.\1" , _UpperCAmelCase )
__snake_case = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__snake_case = re.sub(R"layers_(\d+)" , R"layer.\1" , _UpperCAmelCase )
__snake_case = flax_dict[key]
__snake_case = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__snake_case = torch.from_numpy(converted_dict[key].T )
else:
__snake_case = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __UpperCAmelCase ( _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
__snake_case = get_flax_param(_UpperCAmelCase )
if not use_large:
__snake_case = PixaStructVisionConfig()
__snake_case = PixaStructTextConfig()
else:
__snake_case = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__snake_case = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__snake_case = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_UpperCAmelCase )
__snake_case = PixaStructForConditionalGeneration(_UpperCAmelCase )
__snake_case = rename_and_convert_flax_params(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
__snake_case = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
__snake_case = PixaStructImageProcessor()
__snake_case = PixaStructProcessor(image_processor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
if use_large:
__snake_case = 40_96
__snake_case = True
# mkdir if needed
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
print("Model saved in {}".format(_UpperCAmelCase ) )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
a : List[str] = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> Dict:
# load base model
__snake_case = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
__snake_case = load_file(_UpperCAmelCase )
__snake_case = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
__snake_case = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
__snake_case = pipeline.text_encoder
else:
__snake_case = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
__snake_case = pipeline.unet
# find the target layer
__snake_case = layer_infos.pop(0 )
while len(_UpperCAmelCase ) > -1:
try:
__snake_case = curr_layer.__getattr__(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
__snake_case = layer_infos.pop(0 )
elif len(_UpperCAmelCase ) == 0:
break
except Exception:
if len(_UpperCAmelCase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
__snake_case = layer_infos.pop(0 )
__snake_case = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(_UpperCAmelCase )
else:
pair_keys.append(_UpperCAmelCase )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
__snake_case = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
__snake_case = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase , _UpperCAmelCase ).unsqueeze(2 ).unsqueeze(3 )
else:
__snake_case = state_dict[pair_keys[0]].to(torch.floataa )
__snake_case = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase , _UpperCAmelCase )
# update visited list
for item in pair_keys:
visited.append(_UpperCAmelCase )
return pipeline
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
a : Any = parser.parse_args()
a : Optional[Any] = args.base_model_path
a : List[str] = args.checkpoint_path
a : Optional[Any] = args.dump_path
a : List[str] = args.lora_prefix_unet
a : int = args.lora_prefix_text_encoder
a : str = args.alpha
a : Optional[Any] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
a : int = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a : Optional[Any] = pytest.mark.integration
a : List[str] = {'''comet'''}
a : Union[str, Any] = importlib.util.find_spec('''fairseq''') is not None
a : List[Any] = {'''code_eval'''}
a : str = os.name == '''nt'''
a : Union[str, Any] = {'''bertscore''', '''frugalscore''', '''perplexity'''}
a : Dict = importlib.util.find_spec('''transformers''') is not None
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> Dict:
@wraps(_UpperCAmelCase )
def wrapper(self : Optional[int] , _UpperCAmelCase : Optional[int] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , _UpperCAmelCase )
return wrapper
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> Dict:
@wraps(_UpperCAmelCase )
def wrapper(self : List[str] , _UpperCAmelCase : str ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , _UpperCAmelCase )
return wrapper
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> str:
@wraps(_UpperCAmelCase )
def wrapper(self : Any , _UpperCAmelCase : List[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , _UpperCAmelCase )
return wrapper
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
@local
class SCREAMING_SNAKE_CASE__ ( parameterized.TestCase ):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def A ( self : List[str] , a_ : str ):
"""simple docstring"""
__snake_case = "[...]"
__snake_case = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , a_ ) ).module_path )
__snake_case = datasets.load.import_main_class(metric_module.__name__ , dataset=a_ )
# check parameters
__snake_case = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(a_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
__snake_case = doctest.testmod(a_ , verbose=a_ , raise_on_error=a_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def A ( self : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = "[...]"
__snake_case = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , a_ ) ).module_path )
# run doctest
with self.use_local_metrics():
__snake_case = doctest.testmod(a_ , verbose=a_ , raise_on_error=a_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def A ( self : Any , a_ : Optional[Any] , a_ : List[Any] ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](a_ ):
yield
else:
yield
@contextmanager
def A ( self : Union[str, Any] ):
"""simple docstring"""
def load_local_metric(a_ : Dict , *a_ : List[Any] , **a_ : List[Any] ):
return load_metric(os.path.join("metrics" , a_ ) , *a_ , **a_ )
with patch("datasets.load_metric" ) as mock_load_metric:
__snake_case = load_local_metric
yield
@classmethod
def A ( cls : List[str] , a_ : Optional[int] ):
"""simple docstring"""
def wrapper(a_ : Optional[Any] ):
__snake_case = contextmanager(a_ )
__snake_case = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] ) -> str:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Optional[Any] , a_ : Any ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
__snake_case = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> str:
import torch
def bert_cos_score_idf(_UpperCAmelCase : Any , _UpperCAmelCase : Any , *_UpperCAmelCase : str , **_UpperCAmelCase : Dict ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_UpperCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
__snake_case = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Any:
def load_from_checkpoint(_UpperCAmelCase : Tuple ):
class SCREAMING_SNAKE_CASE__ :
def A ( self : Dict , a_ : Any , *a_ : Dict , **a_ : List[str] ):
"""simple docstring"""
assert len(a_ ) == 2
__snake_case = [0.19, 0.92]
return scores, sum(a_ ) / len(a_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
__snake_case = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
__snake_case = load_from_checkpoint
yield
def __UpperCAmelCase ( ) -> Any:
__snake_case = load_metric(os.path.join("metrics" , "seqeval" ) )
__snake_case = "ERROR"
__snake_case = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(_UpperCAmelCase , match=re.escape(_UpperCAmelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=_UpperCAmelCase )
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
a : Tuple = ['''names''', '''prefix''']
a : Union[str, Any] = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Dict = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ):
__SCREAMING_SNAKE_CASE = ""","""
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """infer"""
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """."""
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """\""""
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 10000
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """strict"""
__SCREAMING_SNAKE_CASE = """error"""
__SCREAMING_SNAKE_CASE = None
def A ( self : List[Any] ):
"""simple docstring"""
if self.delimiter is not None:
__snake_case = self.delimiter
if self.column_names is not None:
__snake_case = self.column_names
@property
def A ( self : int ):
"""simple docstring"""
__snake_case = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ):
__SCREAMING_SNAKE_CASE = CsvConfig
def A ( self : Dict ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def A ( self : Optional[Any] , a_ : Dict ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__snake_case = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case = data_files
if isinstance(a_ , a_ ):
__snake_case = [files]
__snake_case = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
__snake_case = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case = [files]
__snake_case = [dl_manager.iter_files(a_ ) for file in files]
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={"files": files} ) )
return splits
def A ( self : Any , a_ : pa.Table ):
"""simple docstring"""
if self.config.features is not None:
__snake_case = self.config.features.arrow_schema
if all(not require_storage_cast(a_ ) for feature in self.config.features.values() ):
# cheaper cast
__snake_case = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__snake_case = table_cast(a_ , a_ )
return pa_table
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__snake_case = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(a_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
__snake_case = pd.read_csv(a_ , iterator=a_ , dtype=a_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(a_ ):
__snake_case = pa.Table.from_pandas(a_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(a_ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(a_ )}: {e}''' )
raise
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
# to overwrite at feature extractactor specific tests
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a_ , "feature_size" ) )
self.assertTrue(hasattr(a_ , "sampling_rate" ) )
self.assertTrue(hasattr(a_ , "padding_value" ) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case = feat_extract.model_input_names[0]
__snake_case = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
__snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ )
__snake_case = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
__snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ )
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case = feat_extract.model_input_names[0]
__snake_case = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
__snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def A ( self : int ):
"""simple docstring"""
__snake_case = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ )
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case = feat_extract.model_input_names[0]
__snake_case = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
__snake_case = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__snake_case = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def A ( self : Optional[int] , a_ : Union[str, Any]=False ):
"""simple docstring"""
def _inputs_have_equal_length(a_ : List[Any] ):
__snake_case = len(input[0] )
for input_slice in input[1:]:
if len(a_ ) != length:
return False
return True
def _inputs_are_equal(a_ : Optional[int] , a_ : str ):
if len(a_ ) != len(a_ ):
return False
for input_slice_a, input_slice_a in zip(a_ , a_ ):
if not np.allclose(np.asarray(a_ ) , np.asarray(a_ ) , atol=1e-3 ):
return False
return True
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ )
__snake_case = feat_extract.model_input_names[0]
__snake_case = BatchFeature({input_name: speech_inputs} )
__snake_case = self.feat_extract_tester.seq_length_diff
__snake_case = self.feat_extract_tester.max_seq_length + pad_diff
__snake_case = self.feat_extract_tester.min_seq_length
__snake_case = self.feat_extract_tester.batch_size
__snake_case = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__snake_case = feat_extract.pad(a_ , padding=a_ )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(a_ , padding="longest" )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(a_ , padding="max_length" , max_length=len(speech_inputs[-1] ) )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(a_ , padding="longest" , return_tensors="np" )
__snake_case = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(a_ ):
feat_extract.pad(a_ , padding="max_length" )[input_name]
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=a_ , return_tensors="np" )
__snake_case = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_are_equal(a_ , a_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__snake_case = feat_extract.pad(a_ , pad_to_multiple_of=10 )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(a_ , padding="longest" , pad_to_multiple_of=10 )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(
a_ , padding="max_length" , pad_to_multiple_of=10 , max_length=a_ )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(
a_ , padding="max_length" , pad_to_multiple_of=10 , max_length=a_ , return_tensors="np" , )
__snake_case = input_a[input_name]
self.assertTrue(all(len(a_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(a_ , a_ ) )
__snake_case = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(a_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__snake_case = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def A ( self : List[str] , a_ : str=False ):
"""simple docstring"""
def _inputs_have_equal_length(a_ : Union[str, Any] ):
__snake_case = len(input[0] )
for input_slice in input[1:]:
if len(a_ ) != length:
return False
return True
def _inputs_are_equal(a_ : str , a_ : Tuple ):
if len(a_ ) != len(a_ ):
return False
for input_slice_a, input_slice_a in zip(a_ , a_ ):
if not np.allclose(np.asarray(a_ ) , np.asarray(a_ ) , atol=1e-3 ):
return False
return True
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ )
__snake_case = feat_extract.model_input_names[0]
__snake_case = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=a_ )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(a_ , padding="max_length" , max_length=len(speech_inputs[0] ) )
__snake_case = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertFalse(_inputs_have_equal_length(a_ ) )
# truncate to smallest with np
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=a_ , )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
__snake_case = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(a_ ) )
# truncate to middle
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=a_ , return_tensors="np" , )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=a_ )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
__snake_case = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_are_equal(a_ , a_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(a_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(a_ ):
feat_extract.pad(a_ , truncation=a_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(a_ ):
feat_extract.pad(a_ , padding="longest" , truncation=a_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(a_ ):
feat_extract.pad(a_ , padding="longest" , truncation=a_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(a_ ):
feat_extract.pad(a_ , padding="max_length" , truncation=a_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__snake_case = 12
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a_ , truncation=a_ , )
__snake_case = input_a[input_name]
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a_ , )
__snake_case = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__snake_case = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__snake_case = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertFalse(_inputs_have_equal_length(a_ ) )
def A ( self : List[str] ):
"""simple docstring"""
self._check_padding(numpify=a_ )
def A ( self : str ):
"""simple docstring"""
self._check_padding(numpify=a_ )
def A ( self : Any ):
"""simple docstring"""
self._check_truncation(numpify=a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
self._check_truncation(numpify=a_ )
@require_torch
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case = feat_extract.model_input_names[0]
__snake_case = BatchFeature({input_name: speech_inputs} )
__snake_case = feat_extract.pad(a_ , padding="longest" , return_tensors="np" )[input_name]
__snake_case = feat_extract.pad(a_ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case = feat_extract.model_input_names[0]
__snake_case = BatchFeature({input_name: speech_inputs} )
__snake_case = feat_extract.pad(a_ , padding="longest" , return_tensors="np" )[input_name]
__snake_case = feat_extract.pad(a_ , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.feat_extract_dict
__snake_case = True
__snake_case = self.feature_extraction_class(**a_ )
__snake_case = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case = [len(a_ ) for x in speech_inputs]
__snake_case = feat_extract.model_input_names[0]
__snake_case = BatchFeature({input_name: speech_inputs} )
__snake_case = feat_extract.pad(a_ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.feat_extract_dict
__snake_case = True
__snake_case = self.feature_extraction_class(**a_ )
__snake_case = self.feat_extract_tester.prepare_inputs_for_common()
__snake_case = [len(a_ ) for x in speech_inputs]
__snake_case = feat_extract.model_input_names[0]
__snake_case = BatchFeature({input_name: speech_inputs} )
__snake_case = min(a_ )
__snake_case = feat_extract.pad(
a_ , padding="max_length" , max_length=a_ , truncation=a_ , return_tensors="np" )
self.assertIn("attention_mask" , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a : List[Any] = logging.get_logger(__name__)
a : Optional[int] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """layoutlmv3"""
def __init__( self : List[Any] , a_ : Optional[int]=50_265 , a_ : Union[str, Any]=768 , a_ : List[str]=12 , a_ : Optional[int]=12 , a_ : Any=3_072 , a_ : Tuple="gelu" , a_ : Tuple=0.1 , a_ : str=0.1 , a_ : Union[str, Any]=512 , a_ : Optional[int]=2 , a_ : str=0.02 , a_ : List[Any]=1e-5 , a_ : str=1 , a_ : str=0 , a_ : Tuple=2 , a_ : Dict=1_024 , a_ : Any=128 , a_ : Optional[Any]=128 , a_ : Optional[int]=True , a_ : List[Any]=32 , a_ : List[Any]=128 , a_ : Dict=64 , a_ : Tuple=256 , a_ : List[str]=True , a_ : List[str]=True , a_ : Optional[Any]=True , a_ : Dict=224 , a_ : Dict=3 , a_ : List[Any]=16 , a_ : Any=None , **a_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
vocab_size=a_ , hidden_size=a_ , num_hidden_layers=a_ , num_attention_heads=a_ , intermediate_size=a_ , hidden_act=a_ , hidden_dropout_prob=a_ , attention_probs_dropout_prob=a_ , max_position_embeddings=a_ , type_vocab_size=a_ , initializer_range=a_ , layer_norm_eps=a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
__snake_case = max_ad_position_embeddings
__snake_case = coordinate_size
__snake_case = shape_size
__snake_case = has_relative_attention_bias
__snake_case = rel_pos_bins
__snake_case = max_rel_pos
__snake_case = has_spatial_attention_bias
__snake_case = rel_ad_pos_bins
__snake_case = max_rel_ad_pos
__snake_case = text_embed
__snake_case = visual_embed
__snake_case = input_size
__snake_case = num_channels
__snake_case = patch_size
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.12""" )
@property
def A ( self : List[Any] ):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-5
@property
def A ( self : Tuple ):
"""simple docstring"""
return 12
def A ( self : List[str] , a_ : "ProcessorMixin" , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , ):
"""simple docstring"""
setattr(processor.image_processor , "apply_ocr" , a_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__snake_case = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = processor.tokenizer.num_special_tokens_to_add(a_ )
__snake_case = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__snake_case = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__snake_case = self._generate_dummy_images(a_ , a_ , a_ , a_ )
__snake_case = dict(
processor(
a_ , text=a_ , boxes=a_ , return_tensors=a_ , ) )
return inputs
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Dict , a_ : int , a_ : int=None , a_ : Optional[Any]=None ):
"""simple docstring"""
__snake_case = data
__snake_case = previous
__snake_case = next_node
def __str__( self : int ):
"""simple docstring"""
return f'''{self.data}'''
def A ( self : Optional[Any] ):
"""simple docstring"""
return self.data
def A ( self : List[Any] ):
"""simple docstring"""
return self.next
def A ( self : int ):
"""simple docstring"""
return self.previous
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : int ):
"""simple docstring"""
__snake_case = head
def __iter__( self : int ):
"""simple docstring"""
return self
def A ( self : Tuple ):
"""simple docstring"""
if not self.current:
raise StopIteration
else:
__snake_case = self.current.get_data()
__snake_case = self.current.get_next()
return value
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[Any] ):
"""simple docstring"""
__snake_case = None # First node in list
__snake_case = None # Last node in list
def __str__( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.head
__snake_case = []
while current is not None:
nodes.append(current.get_data() )
__snake_case = current.get_next()
return " ".join(str(a_ ) for node in nodes )
def __contains__( self : int , a_ : int ):
"""simple docstring"""
__snake_case = self.head
while current:
if current.get_data() == value:
return True
__snake_case = current.get_next()
return False
def __iter__( self : int ):
"""simple docstring"""
return LinkedListIterator(self.head )
def A ( self : List[Any] ):
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def A ( self : Tuple ):
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def A ( self : int , a_ : Node ):
"""simple docstring"""
if self.head is None:
__snake_case = node
__snake_case = node
else:
self.insert_before_node(self.head , a_ )
def A ( self : List[Any] , a_ : Node ):
"""simple docstring"""
if self.head is None:
self.set_head(a_ )
else:
self.insert_after_node(self.tail , a_ )
def A ( self : int , a_ : int ):
"""simple docstring"""
__snake_case = Node(a_ )
if self.head is None:
self.set_head(a_ )
else:
self.set_tail(a_ )
def A ( self : Dict , a_ : Node , a_ : Node ):
"""simple docstring"""
__snake_case = node
__snake_case = node.previous
if node.get_previous() is None:
__snake_case = node_to_insert
else:
__snake_case = node_to_insert
__snake_case = node_to_insert
def A ( self : int , a_ : Node , a_ : Node ):
"""simple docstring"""
__snake_case = node
__snake_case = node.next
if node.get_next() is None:
__snake_case = node_to_insert
else:
__snake_case = node_to_insert
__snake_case = node_to_insert
def A ( self : List[Any] , a_ : int , a_ : int ):
"""simple docstring"""
__snake_case = 1
__snake_case = Node(a_ )
__snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(a_ , a_ )
return
current_position += 1
__snake_case = node.next
self.insert_after_node(self.tail , a_ )
def A ( self : Any , a_ : int ):
"""simple docstring"""
__snake_case = self.head
while node:
if node.get_data() == item:
return node
__snake_case = node.get_next()
raise Exception("Node not found" )
def A ( self : List[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
if (node := self.get_node(a_ )) is not None:
if node == self.head:
__snake_case = self.head.get_next()
if node == self.tail:
__snake_case = self.tail.get_previous()
self.remove_node_pointers(a_ )
@staticmethod
def A ( a_ : Node ):
"""simple docstring"""
if node.get_next():
__snake_case = node.previous
if node.get_previous():
__snake_case = node.next
__snake_case = None
__snake_case = None
def A ( self : List[Any] ):
"""simple docstring"""
return self.head is None
def __UpperCAmelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
import re
def A__ ( _UpperCAmelCase : str ) -> str:
if len(re.findall("[ATCG]" , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
import pprint
import requests
a : int = '''https://zenquotes.io/api'''
def __UpperCAmelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def __UpperCAmelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
a : str = random_quotes()
pprint.pprint(response)
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a : Optional[int] = logging.get_logger(__name__)
a : Any = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """resnet"""
__SCREAMING_SNAKE_CASE = ["""basic""", """bottleneck"""]
def __init__( self : Optional[int] , a_ : int=3 , a_ : List[str]=64 , a_ : Any=[256, 512, 1_024, 2_048] , a_ : Any=[3, 4, 6, 3] , a_ : str="bottleneck" , a_ : List[Any]="relu" , a_ : Dict=False , a_ : List[Any]=None , a_ : Tuple=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
__snake_case = num_channels
__snake_case = embedding_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = layer_type
__snake_case = hidden_act
__snake_case = downsample_in_first_stage
__snake_case = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(a_ ) + 1 )]
__snake_case , __snake_case = get_aligned_output_features_output_indices(
out_features=a_ , out_indices=a_ , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def A ( self : str ):
"""simple docstring"""
return 1e-3
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> List[Any]:
__snake_case = k_size // 2
__snake_case , __snake_case = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__snake_case = 1 / (2 * pi * sigma) * exp(-(square(_UpperCAmelCase ) + square(_UpperCAmelCase )) / (2 * square(_UpperCAmelCase )) )
return g
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> Optional[int]:
__snake_case , __snake_case = image.shape[0], image.shape[1]
# dst image height and width
__snake_case = height - k_size + 1
__snake_case = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__snake_case = zeros((dst_height * dst_width, k_size * k_size) )
__snake_case = 0
for i, j in product(range(_UpperCAmelCase ) , range(_UpperCAmelCase ) ):
__snake_case = ravel(image[i : i + k_size, j : j + k_size] )
__snake_case = window
row += 1
# turn the kernel into shape(k*k, 1)
__snake_case = gen_gaussian_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = ravel(_UpperCAmelCase )
# reshape and get the dst image
__snake_case = dot(_UpperCAmelCase , _UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
return dst
if __name__ == "__main__":
# read original image
a : List[Any] = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
a : Optional[int] = gaussian_filter(gray, 3, sigma=1)
a : str = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> List[Any]:
return EnvironmentCommand()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@staticmethod
def A ( a_ : ArgumentParser ):
"""simple docstring"""
__snake_case = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = huggingface_hub.__version__
__snake_case = "not installed"
__snake_case = "NA"
if is_torch_available():
import torch
__snake_case = torch.__version__
__snake_case = torch.cuda.is_available()
__snake_case = "not installed"
if is_transformers_available():
import transformers
__snake_case = transformers.__version__
__snake_case = "not installed"
if is_accelerate_available():
import accelerate
__snake_case = accelerate.__version__
__snake_case = "not installed"
if is_xformers_available():
import xformers
__snake_case = xformers.__version__
__snake_case = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def A ( a_ : List[str] ):
"""simple docstring"""
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> List[str]:
# Initialise PyTorch model
__snake_case = BertConfig.from_json_file(_UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
__snake_case = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int ) -> list:
__snake_case = 0
# Number of processes finished
__snake_case = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__snake_case = [0] * no_of_process
# List to include calculation results
__snake_case = [0] * no_of_process
# Sort by arrival time.
__snake_case = [burst_time[i] for i in np.argsort(_UpperCAmelCase )]
__snake_case = [process_name[i] for i in np.argsort(_UpperCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
__snake_case = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__snake_case = arrival_time[i]
__snake_case = 0
# Index showing the location of the process being performed
__snake_case = 0
# Saves the current response ratio.
__snake_case = 0
for i in range(0 , _UpperCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__snake_case = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__snake_case = temp
__snake_case = i
# Calculate the turn around time
__snake_case = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__snake_case = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int ) -> list:
__snake_case = [0] * no_of_process
for i in range(0 , _UpperCAmelCase ):
__snake_case = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
a : Dict = 5
a : List[Any] = ['''A''', '''B''', '''C''', '''D''', '''E''']
a : List[str] = [1, 2, 3, 4, 5]
a : str = [1, 2, 3, 4, 5]
a : List[Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
a : Optional[Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Dict , a_ : str , a_ : Tuple ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=a_ , scheduler=a_ )
@torch.no_grad()
def __call__( self : Tuple , a_ : int = 1 , a_ : Optional[torch.Generator] = None , a_ : int = 50 , a_ : Optional[str] = "pil" , a_ : bool = True , **a_ : Dict , ):
"""simple docstring"""
__snake_case = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a_ , )
__snake_case = image.to(self.device )
# set step values
self.scheduler.set_timesteps(a_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__snake_case = self.unet(a_ , a_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__snake_case = self.scheduler.step(a_ , a_ , a_ ).prev_sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(a_ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=a_ ), "This is a local test"
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Optional[Any] , a_ : Optional[int]=13 , a_ : Optional[Any]=7 , a_ : Any=True , a_ : str=True , a_ : int=True , a_ : List[str]=True , a_ : Dict=True , a_ : Any=False , a_ : Any=False , a_ : Optional[Any]=False , a_ : Optional[int]=2 , a_ : List[str]=99 , a_ : List[Any]=0 , a_ : Tuple=32 , a_ : str=5 , a_ : Dict=4 , a_ : Tuple=0.1 , a_ : Any=0.1 , a_ : Dict=512 , a_ : Optional[Any]=12 , a_ : Dict=2 , a_ : Tuple=0.02 , a_ : Tuple=3 , a_ : List[str]=4 , a_ : Tuple="last" , a_ : Optional[int]=None , a_ : Optional[Any]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_lengths
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = gelu_activation
__snake_case = sinusoidal_embeddings
__snake_case = causal
__snake_case = asm
__snake_case = n_langs
__snake_case = vocab_size
__snake_case = n_special
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = summary_type
__snake_case = use_proj
__snake_case = scope
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_input_lengths:
__snake_case = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case = None
if self.use_token_type_ids:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , 2 ).float()
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A ( self : Optional[int] ):
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def A ( self : Union[str, Any] , a_ : Union[str, Any] , a_ : Dict , a_ : str , a_ : str , a_ : Optional[int] , a_ : Dict , a_ : str , a_ : Optional[int] , a_ : Dict , ):
"""simple docstring"""
__snake_case = FlaubertModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , lengths=a_ , langs=a_ )
__snake_case = model(a_ , langs=a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , a_ : int , a_ : str , a_ : str , a_ : Tuple , a_ : str , a_ : Optional[Any] , a_ : str , a_ : Dict , a_ : Optional[int] , ):
"""simple docstring"""
__snake_case = FlaubertWithLMHeadModel(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Union[str, Any] , a_ : int , a_ : int , a_ : str , a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Any , a_ : Tuple , a_ : Any , a_ : Tuple , ):
"""simple docstring"""
__snake_case = FlaubertForQuestionAnsweringSimple(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
__snake_case = model(a_ , start_positions=a_ , end_positions=a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple , a_ : Any , a_ : Union[str, Any] , a_ : int , a_ : Any , a_ : Union[str, Any] , a_ : str , a_ : List[Any] , a_ : Optional[int] , a_ : Union[str, Any] , ):
"""simple docstring"""
__snake_case = FlaubertForQuestionAnswering(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
__snake_case = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , )
__snake_case = model(
a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , )
((__snake_case ) , ) = result_with_labels.to_tuple()
__snake_case = model(a_ , start_positions=a_ , end_positions=a_ )
((__snake_case ) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A ( self : int , a_ : Any , a_ : Union[str, Any] , a_ : Any , a_ : int , a_ : List[str] , a_ : Optional[Any] , a_ : Dict , a_ : Any , a_ : List[str] , ):
"""simple docstring"""
__snake_case = FlaubertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : List[Any] , a_ : Optional[int] , a_ : int , a_ : int , a_ : List[str] , a_ : Dict , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Optional[Any] , ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = FlaubertForTokenClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Optional[Any] , a_ : List[Any] , a_ : Dict , a_ : int , a_ : Dict , a_ : List[Any] , a_ : Optional[int] , a_ : Optional[int] , a_ : Dict , a_ : List[Any] , ):
"""simple docstring"""
__snake_case = self.num_choices
__snake_case = FlaubertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) = config_and_inputs
__snake_case = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def A ( self : List[Any] , a_ : Tuple , a_ : Optional[int] , a_ : str , a_ : int , a_ : Any ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A ( self : List[str] , a_ : List[str] , a_ : List[Any] , a_ : Any=False ):
"""simple docstring"""
__snake_case = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
__snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = FlaubertModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , emb_dim=37 )
def A ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*a_ )
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = FlaubertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def A ( self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__snake_case = True
__snake_case = model_class(config=a_ )
__snake_case = self._prepare_for_class(a_ , a_ )
__snake_case = torch.jit.trace(
a_ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , "traced_model.pt" ) )
__snake_case = torch.jit.load(os.path.join(a_ , "traced_model.pt" ) , map_location=a_ )
loaded(inputs_dict["input_ids"].to(a_ ) , inputs_dict["attention_mask"].to(a_ ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Any ):
"""simple docstring"""
__snake_case = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
__snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
__snake_case = model(a_ )[0]
__snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
__snake_case = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE = frozenset([] )
def A ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
__snake_case = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
__snake_case = CLIPTextModel(a_ )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A ( self : Tuple , a_ : int , a_ : Optional[int]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ).resize((64, 64) )
__snake_case = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = StableDiffusionInpaintPipeline(**a_ )
__snake_case = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = sd_pipe(**a_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Dict ):
"""simple docstring"""
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
__snake_case = "stabilityai/stable-diffusion-2-inpainting"
__snake_case = StableDiffusionInpaintPipeline.from_pretrained(a_ , safety_checker=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case = "Face of a yellow cat, high resolution, sitting on a park bench"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A ( self : Any ):
"""simple docstring"""
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
__snake_case = "stabilityai/stable-diffusion-2-inpainting"
__snake_case = StableDiffusionInpaintPipeline.from_pretrained(
a_ , torch_dtype=torch.floataa , safety_checker=a_ , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case = "Face of a yellow cat, high resolution, sitting on a park bench"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A ( self : List[Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__snake_case = "stabilityai/stable-diffusion-2-inpainting"
__snake_case = PNDMScheduler.from_pretrained(a_ , subfolder="scheduler" )
__snake_case = StableDiffusionInpaintPipeline.from_pretrained(
a_ , safety_checker=a_ , scheduler=a_ , torch_dtype=torch.floataa , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__snake_case = "Face of a yellow cat, high resolution, sitting on a park bench"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> int:
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__snake_case = grid[0]
for row_n in range(1 , len(_UpperCAmelCase ) ):
__snake_case = grid[row_n]
__snake_case = fill_row(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = grid[row_n]
return grid[-1][-1]
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : list ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(_UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __UpperCAmelCase ( _UpperCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray ) -> XGBClassifier:
__snake_case = XGBClassifier()
classifier.fit(_UpperCAmelCase , _UpperCAmelCase )
return classifier
def __UpperCAmelCase ( ) -> None:
__snake_case = load_iris()
__snake_case , __snake_case = data_handling(_UpperCAmelCase )
__snake_case , __snake_case , __snake_case , __snake_case = train_test_split(
_UpperCAmelCase , _UpperCAmelCase , test_size=0.25 )
__snake_case = iris["target_names"]
# Create an XGBoost Classifier from the training data
__snake_case = xgboost(_UpperCAmelCase , _UpperCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , display_labels=_UpperCAmelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a : Optional[int] = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum ):
__SCREAMING_SNAKE_CASE = """all_checks"""
__SCREAMING_SNAKE_CASE = """basic_checks"""
__SCREAMING_SNAKE_CASE = """no_checks"""
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
def __UpperCAmelCase ( _UpperCAmelCase : Optional[dict] , _UpperCAmelCase : dict , _UpperCAmelCase : int=None ) -> Union[str, Any]:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
__snake_case = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__snake_case = " for " + verification_name if verification_name is not None else ""
if len(_UpperCAmelCase ) > 0:
raise NonMatchingChecksumError(
F'''Checksums didn\'t match{for_verification_name}:\n'''
F'''{bad_urls}\n'''
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
pass
def __UpperCAmelCase ( _UpperCAmelCase : Optional[dict] , _UpperCAmelCase : dict ) -> List[str]:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
if len(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(_UpperCAmelCase ) - set(_UpperCAmelCase ) ) )
__snake_case = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCAmelCase ) )
logger.info("All the splits matched successfully." )
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : bool = True ) -> dict:
if record_checksum:
__snake_case = shaaaa()
with open(_UpperCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(_UpperCAmelCase )
__snake_case = m.hexdigest()
else:
__snake_case = None
return {"num_bytes": os.path.getsize(_UpperCAmelCase ), "checksum": checksum}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__SCREAMING_SNAKE_CASE = False
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = 32
__snake_case = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__snake_case = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a_ , projection_dim=a_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__snake_case = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a_ , num_layers=1 , )
torch.manual_seed(0 )
__snake_case = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=a_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__snake_case = StableUnCLIPImageNormalizer(embedding_dim=a_ )
__snake_case = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__snake_case = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) )
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a_ , layers_per_block=1 , upcast_attention=a_ , use_linear_projection=a_ , )
torch.manual_seed(0 )
__snake_case = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=a_ , steps_offset=1 , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL()
__snake_case = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def A ( self : Tuple , a_ : Dict , a_ : str=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=a_ )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=a_ )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__snake_case = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = torch.Generator(device="cpu" ).manual_seed(0 )
__snake_case = pipe("anime turle" , generator=a_ , output_type="np" )
__snake_case = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a_ , a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__snake_case = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__snake_case = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
a : str = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Tuple , *a_ : List[str] , **a_ : Any ):
"""simple docstring"""
super().__init__(*a_ , **a_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A ( self : int , a_ : Tuple=None , a_ : List[str]=None , a_ : Dict=None ):
"""simple docstring"""
__snake_case = {}
__snake_case = {}
if prompt is not None:
__snake_case = prompt
if generate_kwargs is not None:
__snake_case = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__snake_case = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
__snake_case = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , a_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a_ : Optional[int] ):
"""simple docstring"""
return super().__call__(a_ , **a_ )
def A ( self : Tuple , a_ : Optional[int] , a_ : Tuple=None ):
"""simple docstring"""
__snake_case = load_image(a_ )
if prompt is not None:
if not isinstance(a_ , a_ ):
raise ValueError(
f'''Received an invalid text input, got - {type(a_ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
__snake_case = self.model.config.model_type
if model_type == "git":
__snake_case = self.image_processor(images=a_ , return_tensors=self.framework )
__snake_case = self.tokenizer(text=a_ , add_special_tokens=a_ ).input_ids
__snake_case = [self.tokenizer.cls_token_id] + input_ids
__snake_case = torch.tensor(a_ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
__snake_case = self.image_processor(images=a_ , header_text=a_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__snake_case = self.image_processor(images=a_ , return_tensors=self.framework )
__snake_case = self.tokenizer(a_ , return_tensors=self.framework )
model_inputs.update(a_ )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
__snake_case = self.image_processor(images=a_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__snake_case = None
return model_inputs
def A ( self : List[str] , a_ : List[str] , a_ : Optional[int]=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , a_ )
and all(x is None for x in model_inputs["input_ids"] )
):
__snake_case = None
if generate_kwargs is None:
__snake_case = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__snake_case = model_inputs.pop(self.model.main_input_name )
__snake_case = self.model.generate(a_ , **a_ , **a_ )
return model_outputs
def A ( self : List[str] , a_ : Any ):
"""simple docstring"""
__snake_case = []
for output_ids in model_outputs:
__snake_case = {
"generated_text": self.tokenizer.decode(
a_ , skip_special_tokens=a_ , )
}
records.append(a_ )
return records
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """deta"""
__SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , a_ : str=None , a_ : List[Any]=900 , a_ : Optional[Any]=2_048 , a_ : Optional[int]=6 , a_ : Dict=2_048 , a_ : Optional[Any]=8 , a_ : List[str]=6 , a_ : Optional[Any]=1_024 , a_ : List[Any]=8 , a_ : List[Any]=0.0 , a_ : int=True , a_ : Union[str, Any]="relu" , a_ : Optional[int]=256 , a_ : Union[str, Any]=0.1 , a_ : Tuple=0.0 , a_ : Optional[Any]=0.0 , a_ : Optional[int]=0.02 , a_ : str=1.0 , a_ : Dict=True , a_ : Dict=False , a_ : int="sine" , a_ : str=5 , a_ : Any=4 , a_ : Union[str, Any]=4 , a_ : Tuple=True , a_ : str=300 , a_ : List[Any]=True , a_ : Dict=True , a_ : Dict=1 , a_ : Optional[Any]=5 , a_ : Union[str, Any]=2 , a_ : Union[str, Any]=1 , a_ : Dict=1 , a_ : str=5 , a_ : List[Any]=2 , a_ : Dict=0.1 , a_ : Any=0.25 , **a_ : int , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__snake_case = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(a_ , a_ ):
__snake_case = backbone_config.pop("model_type" )
__snake_case = CONFIG_MAPPING[backbone_model_type]
__snake_case = config_class.from_dict(a_ )
__snake_case = backbone_config
__snake_case = num_queries
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = init_xavier_std
__snake_case = encoder_layerdrop
__snake_case = auxiliary_loss
__snake_case = position_embedding_type
# deformable attributes
__snake_case = num_feature_levels
__snake_case = encoder_n_points
__snake_case = decoder_n_points
__snake_case = two_stage
__snake_case = two_stage_num_proposals
__snake_case = with_box_refine
__snake_case = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__snake_case = class_cost
__snake_case = bbox_cost
__snake_case = giou_cost
# Loss coefficients
__snake_case = mask_loss_coefficient
__snake_case = dice_loss_coefficient
__snake_case = bbox_loss_coefficient
__snake_case = giou_loss_coefficient
__snake_case = eos_coefficient
__snake_case = focal_alpha
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def A ( self : Tuple ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A ( self : str ):
"""simple docstring"""
return self.d_model
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = copy.deepcopy(self.__dict__ )
__snake_case = self.backbone_config.to_dict()
__snake_case = self.__class__.model_type
return output
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
a : Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Dict , *a_ : Dict , **a_ : Any ):
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
a : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , *a_ : str , **a_ : Optional[int] ):
"""simple docstring"""
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a : List[str] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : str = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """donut-swin"""
__SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any] , a_ : List[Any]=224 , a_ : List[Any]=4 , a_ : int=3 , a_ : Dict=96 , a_ : Union[str, Any]=[2, 2, 6, 2] , a_ : List[str]=[3, 6, 12, 24] , a_ : Optional[Any]=7 , a_ : List[Any]=4.0 , a_ : Optional[Any]=True , a_ : Any=0.0 , a_ : Union[str, Any]=0.0 , a_ : Dict=0.1 , a_ : int="gelu" , a_ : Dict=False , a_ : int=0.02 , a_ : str=1e-5 , **a_ : str , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(a_ )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case = int(embed_dim * 2 ** (len(a_ ) - 1) )
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
import math
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 ) -> list:
__snake_case = end or len(_UpperCAmelCase )
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = i
__snake_case = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case = array[temp_index - 1]
temp_index -= 1
__snake_case = temp_index_value
return array
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None: # Max Heap
__snake_case = index
__snake_case = 2 * index + 1 # Left Node
__snake_case = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case = right_index
if largest != index:
__snake_case , __snake_case = array[largest], array[index]
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> list:
__snake_case = len(_UpperCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i in range(n - 1 , 0 , -1 ):
__snake_case , __snake_case = array[0], array[i]
heapify(_UpperCAmelCase , 0 , _UpperCAmelCase )
return array
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
__snake_case = low
__snake_case = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case , __snake_case = array[j], array[i]
i += 1
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> list:
if len(_UpperCAmelCase ) == 0:
return array
__snake_case = 2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) )
__snake_case = 16
return intro_sort(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_UpperCAmelCase )
max_depth -= 1
__snake_case = median_of_a(_UpperCAmelCase , _UpperCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
intro_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = p
return insertion_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Union[str, Any] = input('''Enter numbers separated by a comma : ''').strip()
a : Dict = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
def A__ ( ) -> Tuple:
__snake_case = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int = 10 ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or n < 0:
raise ValueError("Invalid input" )
__snake_case = 10**n
__snake_case = 2_84_33 * (pow(2 , 7_83_04_57 , _UpperCAmelCase )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 717 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Tuple , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : bool = True , a_ : Dict[str, int] = None , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = True , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = size if size is not None else {"shortest_edge": 224}
__snake_case = get_size_dict(a_ , default_to_square=a_ )
__snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
__snake_case = get_size_dict(a_ , default_to_square=a_ , param_name="crop_size" )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
__snake_case = do_convert_rgb
def A ( self : str , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BICUBIC , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , ):
"""simple docstring"""
__snake_case = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__snake_case = get_resize_output_image_size(a_ , size=size["shortest_edge"] , default_to_square=a_ )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def A ( self : int , a_ : np.ndarray , a_ : Dict[str, int] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Any , ):
"""simple docstring"""
__snake_case = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(a_ , size=(size["height"], size["width"]) , data_format=a_ , **a_ )
def A ( self : str , a_ : np.ndarray , a_ : Union[int, float] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : Tuple , ):
"""simple docstring"""
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def A ( self : Optional[Any] , a_ : np.ndarray , a_ : Union[float, List[float]] , a_ : Union[float, List[float]] , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : List[Any] , ):
"""simple docstring"""
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def A ( self : Dict , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : int = None , a_ : bool = None , a_ : float = None , a_ : bool = None , a_ : Optional[Union[float, List[float]]] = None , a_ : Optional[Union[float, List[float]]] = None , a_ : bool = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **a_ : Any , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(a_ , param_name="size" , default_to_square=a_ )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case = crop_size if crop_size is not None else self.crop_size
__snake_case = get_size_dict(a_ , param_name="crop_size" , default_to_square=a_ )
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__snake_case = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__snake_case = [convert_to_rgb(a_ ) for image in images]
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(a_ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_center_crop:
__snake_case = [self.center_crop(image=a_ , size=a_ ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=a_ , scale=a_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=a_ , mean=a_ , std=a_ ) for image in images]
__snake_case = [to_channel_dimension_format(a_ , a_ ) for image in images]
__snake_case = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 718 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """mobilenet_v2"""
def __init__( self : Tuple , a_ : int=3 , a_ : int=224 , a_ : List[Any]=1.0 , a_ : List[str]=8 , a_ : Dict=8 , a_ : Optional[Any]=6 , a_ : Optional[Any]=32 , a_ : str=True , a_ : Union[str, Any]=True , a_ : List[Any]="relu6" , a_ : Optional[Any]=True , a_ : Any=0.8 , a_ : Dict=0.02 , a_ : Optional[int]=0.001 , a_ : Optional[int]=255 , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(**a_ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
__snake_case = num_channels
__snake_case = image_size
__snake_case = depth_multiplier
__snake_case = depth_divisible_by
__snake_case = min_depth
__snake_case = expand_ratio
__snake_case = output_stride
__snake_case = first_layer_is_expansion
__snake_case = finegrained_output
__snake_case = hidden_act
__snake_case = tf_padding
__snake_case = classifier_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def A ( self : int ):
"""simple docstring"""
return 1e-4
| 680 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """data2vec-text"""
def __init__( self : List[str] , a_ : str=30_522 , a_ : Optional[int]=768 , a_ : Dict=12 , a_ : int=12 , a_ : Dict=3_072 , a_ : Dict="gelu" , a_ : Optional[Any]=0.1 , a_ : List[str]=0.1 , a_ : int=512 , a_ : Any=2 , a_ : int=0.02 , a_ : Dict=1e-12 , a_ : Dict=1 , a_ : Any=0 , a_ : Dict=2 , a_ : Optional[int]="absolute" , a_ : List[Any]=True , a_ : Dict=None , **a_ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 680 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = MBartConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : Union[str, Any] , a_ : List[str] , a_ : str=13 , a_ : str=7 , a_ : Union[str, Any]=True , a_ : Dict=False , a_ : Optional[int]=99 , a_ : List[Any]=32 , a_ : Tuple=2 , a_ : List[str]=4 , a_ : Optional[Any]=37 , a_ : List[str]=0.1 , a_ : Tuple=0.1 , a_ : str=20 , a_ : Dict=2 , a_ : Any=1 , a_ : Any=0 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = eos_token_id
__snake_case = pad_token_id
__snake_case = bos_token_id
def A ( self : int ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__snake_case = prepare_mbart_inputs_dict(a_ , a_ , a_ )
return config, inputs_dict
def A ( self : Any , a_ : int , a_ : str ):
"""simple docstring"""
__snake_case = TFMBartModel(config=a_ ).get_decoder()
__snake_case = inputs_dict["input_ids"]
__snake_case = input_ids[:1, :]
__snake_case = inputs_dict["attention_mask"][:1, :]
__snake_case = inputs_dict["head_mask"]
__snake_case = 1
# first forward pass
__snake_case = model(a_ , attention_mask=a_ , head_mask=a_ , use_cache=a_ )
__snake_case , __snake_case = outputs.to_tuple()
__snake_case = past_key_values[1]
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple=None , ) -> str:
if attention_mask is None:
__snake_case = tf.cast(tf.math.not_equal(_UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : int , a_ : Dict , a_ : Optional[Any] , a_ : str , a_ : Tuple , a_ : List[Any] ):
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = TFMBartModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
__SCREAMING_SNAKE_CASE = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
__SCREAMING_SNAKE_CASE = """facebook/mbart-large-en-ro"""
@cached_property
def A ( self : str ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A ( self : Optional[Any] , **a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.translate_src_text(**a_ )
self.assertListEqual(self.expected_text , a_ )
def A ( self : int , **a_ : Optional[int] ):
"""simple docstring"""
__snake_case = self.tokenizer(self.src_text , **a_ , return_tensors="tf" )
__snake_case = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__snake_case = self.tokenizer.batch_decode(a_ , skip_special_tokens=a_ )
return generated_words
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 720 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 680 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a : Tuple = logging.getLogger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> int:
# save results
if os.path.exists(_UpperCAmelCase ):
if os.path.exists(os.path.join(_UpperCAmelCase , "config.json" ) ) and os.path.isfile(
os.path.join(_UpperCAmelCase , "config.json" ) ):
os.remove(os.path.join(_UpperCAmelCase , "config.json" ) )
if os.path.exists(os.path.join(_UpperCAmelCase , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(_UpperCAmelCase , "pytorch_model.bin" ) ):
os.remove(os.path.join(_UpperCAmelCase , "pytorch_model.bin" ) )
else:
os.makedirs(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=False ) -> Any:
__snake_case = 2
if unlogit:
__snake_case = torch.pow(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = p * torch.log(_UpperCAmelCase )
__snake_case = 0
return -plogp.sum(dim=-1 )
def __UpperCAmelCase ( _UpperCAmelCase : Any ) -> List[str]:
logger.info("lv, h >\t" + "\t".join(F'''{x + 1}''' for x in range(len(_UpperCAmelCase ) ) ) )
for row in range(len(_UpperCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + "\t".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : int=False ) -> Any:
__snake_case , __snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
__snake_case = torch.zeros(_UpperCAmelCase , _UpperCAmelCase ).to(args.device )
__snake_case = torch.zeros(_UpperCAmelCase , _UpperCAmelCase ).to(args.device )
if head_mask is None:
__snake_case = torch.ones(_UpperCAmelCase , _UpperCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_UpperCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__snake_case = None
__snake_case = 0.0
__snake_case = 0.0
for step, inputs in enumerate(tqdm(_UpperCAmelCase , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__snake_case = tuple(t.to(args.device ) for t in inputs )
((__snake_case ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__snake_case = model(_UpperCAmelCase , labels=_UpperCAmelCase , head_mask=_UpperCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__snake_case , __snake_case , __snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_UpperCAmelCase ):
__snake_case = entropy(attn.detach() , _UpperCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_UpperCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__snake_case = 2
__snake_case = torch.pow(torch.pow(_UpperCAmelCase , _UpperCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0
if not args.dont_normalize_global_importance:
__snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(_UpperCAmelCase )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(_UpperCAmelCase )
logger.info("Head ranked by importance scores" )
__snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__snake_case = torch.arange(
head_importance.numel() , device=args.device )
__snake_case = head_ranks.view_as(_UpperCAmelCase )
print_ad_tensor(_UpperCAmelCase )
return attn_entropy, head_importance, total_loss
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> List[Any]:
__snake_case , __snake_case , __snake_case = compute_heads_importance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , compute_entropy=_UpperCAmelCase )
__snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , _UpperCAmelCase , original_score * args.masking_threshold )
__snake_case = torch.ones_like(_UpperCAmelCase )
__snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__snake_case = original_score
while current_score >= original_score * args.masking_threshold:
__snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__snake_case = float("Inf" )
__snake_case = head_importance.view(-1 ).sort()[1]
if len(_UpperCAmelCase ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__snake_case = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__snake_case = new_head_mask.view(-1 )
__snake_case = 0.0
__snake_case = new_head_mask.view_as(_UpperCAmelCase )
__snake_case = new_head_mask.clone().detach()
print_ad_tensor(_UpperCAmelCase )
# Compute metric and head importance again
__snake_case , __snake_case , __snake_case = compute_heads_importance(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , compute_entropy=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , _UpperCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("Final head mask" )
print_ad_tensor(_UpperCAmelCase )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> str:
__snake_case = datetime.now()
__snake_case , __snake_case , __snake_case = compute_heads_importance(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , compute_entropy=_UpperCAmelCase , compute_importance=_UpperCAmelCase , head_mask=_UpperCAmelCase )
__snake_case = 1 / loss
__snake_case = datetime.now() - before_time
__snake_case = sum(p.numel() for p in model.parameters() )
__snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = [
v,
]
assert sum(len(_UpperCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_UpperCAmelCase )
__snake_case = sum(p.numel() for p in model.parameters() )
__snake_case = datetime.now()
__snake_case , __snake_case , __snake_case = compute_heads_importance(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , compute_entropy=_UpperCAmelCase , compute_importance=_UpperCAmelCase , head_mask=_UpperCAmelCase , actually_pruned=_UpperCAmelCase , )
__snake_case = 1 / loss
__snake_case = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , _UpperCAmelCase , _UpperCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , _UpperCAmelCase , _UpperCAmelCase )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 1_00 )
save_model(_UpperCAmelCase , args.output_dir )
def __UpperCAmelCase ( ) -> str:
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=_UpperCAmelCase , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=_UpperCAmelCase , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=_UpperCAmelCase , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=_UpperCAmelCase , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=_UpperCAmelCase , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=_UpperCAmelCase , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=_UpperCAmelCase , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=_UpperCAmelCase , help="Batch size." )
parser.add_argument("--seed" , type=_UpperCAmelCase , default=42 )
parser.add_argument("--local_rank" , type=_UpperCAmelCase , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=_UpperCAmelCase , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=_UpperCAmelCase , default="" , help="Can be used for distant debugging." )
__snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__snake_case = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__snake_case = torch.device("cuda" , args.local_rank )
__snake_case = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__snake_case = nn.parallel.DistributedDataParallel(
_UpperCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCAmelCase )
elif args.n_gpu > 1:
__snake_case = nn.DataParallel(_UpperCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_UpperCAmelCase )
torch.save(_UpperCAmelCase , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , _UpperCAmelCase )
# Prepare dataset
__snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__snake_case = (torch.from_numpy(_UpperCAmelCase ),)
__snake_case = TensorDataset(*_UpperCAmelCase )
__snake_case = RandomSampler(_UpperCAmelCase )
__snake_case = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__snake_case = mask_heads(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
prune_heads(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 721 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Tuple , a_ : Optional[Any]=2 , a_ : str=32 , a_ : Dict=16 , a_ : List[str]=3 , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[str]=32 , a_ : int=4 , a_ : str=[0, 1, 2, 3] , a_ : Any=4 , a_ : Optional[int]=37 , a_ : Any="gelu" , a_ : Optional[int]=0.1 , a_ : Optional[Any]=0.1 , a_ : Union[str, Any]=0.02 , a_ : Union[str, Any]=3 , a_ : Any=[1, 384, 24, 24] , a_ : Optional[Any]=True , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = backbone_out_indices
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = backbone_featmap_shape
__snake_case = scope
__snake_case = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : int ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=a_ , backbone_featmap_shape=self.backbone_featmap_shape , )
def A ( self : int , a_ : Union[str, Any] , a_ : List[str] , a_ : List[str] ):
"""simple docstring"""
__snake_case = DPTModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForDepthEstimation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def A ( self : Optional[Any] , a_ : List[str] , a_ : int , a_ : Tuple ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = DPTForSemanticSegmentation(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = DPTModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
if model_class in get_values(a_ ):
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : int ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = False
__snake_case = True
if model_class in get_values(a_ ) or not model_class.supports_gradient_checkpointing:
continue
__snake_case = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case = model(**a_ ).loss
loss.backward()
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(a_ )
for model_class in self.all_model_classes:
__snake_case = model_class(config=a_ )
# Skip the check for the backbone
__snake_case = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A ( self : Tuple ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__snake_case = DPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = "add"
with self.assertRaises(a_ ):
__snake_case = DPTForDepthEstimation(a_ )
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__snake_case = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(a_ )
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
__snake_case = outputs.predicted_depth
# verify the predicted depth
__snake_case = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , a_ )
__snake_case = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(a_ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , a_ , atol=1e-4 ) )
| 680 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : str , a_ : Any , a_ : List[str]=7 , a_ : Union[str, Any]=3 , a_ : Any=18 , a_ : Tuple=30 , a_ : str=400 , a_ : List[str]=True , a_ : Any=None , a_ : List[Any]=True , a_ : Dict=None , a_ : Optional[Any]=True , a_ : List[str]=[0.5, 0.5, 0.5] , a_ : List[Any]=[0.5, 0.5, 0.5] , a_ : List[Any]=False , ):
"""simple docstring"""
__snake_case = size if size is not None else {"height": 20, "width": 20}
__snake_case = crop_size if crop_size is not None else {"height": 18, "width": 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_center_crop
__snake_case = crop_size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
__snake_case = do_reduce_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCAmelCase ( ) -> List[Any]:
__snake_case = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__snake_case = Image.open(dataset[0]["file"] )
__snake_case = Image.open(dataset[1]["file"] )
return image, map
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__snake_case = Image.open(ds[0]["file"] )
__snake_case = Image.open(ds[1]["file"] )
__snake_case = Image.open(ds[2]["file"] )
__snake_case = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = BeitImageProcessor if is_vision_available() else None
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = BeitImageProcessingTester(self )
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : str ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "do_center_crop" ) )
self.assertTrue(hasattr(a_ , "center_crop" ) )
self.assertTrue(hasattr(a_ , "do_normalize" ) )
self.assertTrue(hasattr(a_ , "image_mean" ) )
self.assertTrue(hasattr(a_ , "image_std" ) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , a_ )
__snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a_ )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , a_ )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__snake_case = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
__snake_case = []
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
__snake_case = image_processing(a_ , a_ , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
__snake_case , __snake_case = prepare_semantic_single_inputs()
__snake_case = image_processing(a_ , a_ , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
__snake_case , __snake_case = prepare_semantic_batch_inputs()
__snake_case = image_processing(a_ , a_ , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__snake_case , __snake_case = prepare_semantic_single_inputs()
__snake_case = image_processing(a_ , a_ , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
__snake_case = True
__snake_case = image_processing(a_ , a_ , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 700 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def A ( self : Any ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(a_ ) for k, v in self.__dict__.items()} )
| 680 | 0 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : Optional[Any] = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case = "A painting of a squirrel eating a burger "
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 680 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : int = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """xlm-roberta-xl"""
def __init__( self : List[str] , a_ : Optional[Any]=250_880 , a_ : Any=2_560 , a_ : Dict=36 , a_ : Union[str, Any]=32 , a_ : List[str]=10_240 , a_ : Tuple="gelu" , a_ : Optional[int]=0.1 , a_ : str=0.1 , a_ : str=514 , a_ : Union[str, Any]=1 , a_ : str=0.02 , a_ : List[str]=1e-05 , a_ : List[Any]=1 , a_ : Any=0 , a_ : Union[str, Any]=2 , a_ : str="absolute" , a_ : int=True , a_ : Any=None , **a_ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 702 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 680 | 0 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
a : Any = get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=0 ) -> Any:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving model to {ckpt_dir}''' )
__snake_case = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_UpperCAmelCase , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : str=0 ) -> List[str]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_UpperCAmelCase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
__snake_case = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__snake_case = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading model from {input_model_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
__snake_case = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_UpperCAmelCase , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , planner=DefaultLoadPlanner() , )
__snake_case = state_dict["model"]
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=0 ) -> Union[str, Any]:
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__snake_case = FSDP.optim_state_dict(_UpperCAmelCase , _UpperCAmelCase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
__snake_case = os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_UpperCAmelCase ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=0 ) -> Union[str, Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_UpperCAmelCase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__snake_case = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__snake_case = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
__snake_case = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
__snake_case = torch.load(_UpperCAmelCase )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
__snake_case = (
os.path.join(_UpperCAmelCase , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
__snake_case = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_UpperCAmelCase ) , )
__snake_case = optim_state["optimizer"]
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
__snake_case = FSDP.optim_state_dict_to_load(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
optimizer.load_state_dict(_UpperCAmelCase )
| 703 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__snake_case = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def __UpperCAmelCase ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> list[int]:
__snake_case = [0] * no_of_processes
__snake_case = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_UpperCAmelCase ):
__snake_case = burst_time[i]
__snake_case = 0
__snake_case = 0
__snake_case = 9_99_99_99_99
__snake_case = 0
__snake_case = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_UpperCAmelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__snake_case = remaining_time[j]
__snake_case = j
__snake_case = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__snake_case = remaining_time[short]
if minm == 0:
__snake_case = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
__snake_case = False
# Find finish time of current process
__snake_case = increment_time + 1
# Calculate waiting time
__snake_case = finish_time - arrival_time[short]
__snake_case = finar - burst_time[short]
if waiting_time[short] < 0:
__snake_case = 0
# Increment time
increment_time += 1
return waiting_time
def __UpperCAmelCase ( _UpperCAmelCase : list[int] , _UpperCAmelCase : int , _UpperCAmelCase : list[int] ) -> list[int]:
__snake_case = [0] * no_of_processes
for i in range(_UpperCAmelCase ):
__snake_case = burst_time[i] + waiting_time[i]
return turn_around_time
def __UpperCAmelCase ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> None:
__snake_case = 0
__snake_case = 0
for i in range(_UpperCAmelCase ):
__snake_case = total_waiting_time + waiting_time[i]
__snake_case = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a : str = int(input())
a : Any = [0] * no_of_processes
a : Any = [0] * no_of_processes
a : List[str] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a : int = map(int, input().split())
a : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a : Optional[int] = burst_time
a : List[Any] = no_of_processes
a : List[Any] = waiting_time
a : Tuple = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a : Dict = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 704 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> str:
if number > 0:
raise ValueError("input must be a negative integer" )
__snake_case = len(bin(_UpperCAmelCase )[3:] )
__snake_case = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
__snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a : List[Any] = logging.get_logger(__name__)
a : Dict = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
a : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Optional[int]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__snake_case = model_type_to_module_name(_UpperCAmelCase )
__snake_case = importlib.import_module(F'''.{module_name}''' , "transformers.models" )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , "__name__" , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__snake_case = importlib.import_module("transformers" )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, os.PathLike] , _UpperCAmelCase : Optional[Union[str, os.PathLike]] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[Dict[str, str]] = None , _UpperCAmelCase : Optional[Union[bool, str]] = None , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Optional[Any] , ) -> Tuple:
__snake_case = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(_UpperCAmelCase , encoding="utf-8" ) as reader:
return json.load(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[int] ):
"""simple docstring"""
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(a_ )
def A ( cls : int , a_ : Optional[Any] , **a_ : int ):
"""simple docstring"""
__snake_case = kwargs.pop("config" , a_ )
__snake_case = kwargs.pop("trust_remote_code" , a_ )
__snake_case = True
__snake_case , __snake_case = ImageProcessingMixin.get_image_processor_dict(a_ , **a_ )
__snake_case = config_dict.get("image_processor_type" , a_ )
__snake_case = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
__snake_case = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__snake_case = config_dict.pop("feature_extractor_type" , a_ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__snake_case = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__snake_case = config_dict["auto_map"]["AutoFeatureExtractor"]
__snake_case = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a_ , a_ ):
__snake_case = AutoConfig.from_pretrained(a_ , **a_ )
# It could be in `config.image_processor_type``
__snake_case = getattr(a_ , "image_processor_type" , a_ )
if hasattr(a_ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
__snake_case = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__snake_case = image_processor_class_from_name(a_ )
__snake_case = image_processor_auto_map is not None
__snake_case = image_processor_class is not None or type(a_ ) in IMAGE_PROCESSOR_MAPPING
__snake_case = resolve_trust_remote_code(
a_ , a_ , a_ , a_ )
if has_remote_code and trust_remote_code:
__snake_case = get_class_from_dynamic_module(
a_ , a_ , **a_ )
__snake_case = kwargs.pop("code_revision" , a_ )
if os.path.isdir(a_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a_ , **a_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a_ , **a_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a_ ) in IMAGE_PROCESSOR_MAPPING:
__snake_case = IMAGE_PROCESSOR_MAPPING[type(a_ )]
return image_processor_class.from_dict(a_ , **a_ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def A ( a_ : List[Any] , a_ : List[Any] ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(a_ , a_ )
| 705 |
'''simple docstring'''
from timeit import timeit
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
number &= number - 1
result += 1
return result
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> int:
if number < 0:
raise ValueError("the value of input must not be negative" )
__snake_case = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __UpperCAmelCase ( ) -> None:
def do_benchmark(_UpperCAmelCase : int ) -> None:
__snake_case = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(_UpperCAmelCase ) = }''' )
__snake_case = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=_UpperCAmelCase )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(_UpperCAmelCase ) = }''' )
__snake_case = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=_UpperCAmelCase , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(_UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 680 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
__snake_case = SamImageProcessor()
__snake_case = SamProcessor(a_ )
processor.save_pretrained(self.tmpdirname )
def A ( self : int , **a_ : Dict ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).image_processor
def A ( self : Tuple ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : str ):
"""simple docstring"""
__snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
__snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=a_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(a_ , return_tensors="np" )
__snake_case = processor(images=a_ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=a_ )
__snake_case = [torch.ones((1, 3, 5, 5) )]
__snake_case = [[1_764, 2_646]]
__snake_case = [[683, 1_024]]
__snake_case = processor.post_process_masks(a_ , a_ , a_ )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__snake_case = processor.post_process_masks(
a_ , torch.tensor(a_ ) , torch.tensor(a_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__snake_case = [np.ones((1, 3, 5, 5) )]
__snake_case = processor.post_process_masks(a_ , np.array(a_ ) , np.array(a_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__snake_case = [[1, 0], [0, 1]]
with self.assertRaises(a_ ):
__snake_case = processor.post_process_masks(a_ , np.array(a_ ) , np.array(a_ ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
__snake_case = SamImageProcessor()
__snake_case = SamProcessor(a_ )
processor.save_pretrained(self.tmpdirname )
def A ( self : Optional[int] , **a_ : Union[str, Any] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).image_processor
def A ( self : List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : int ):
"""simple docstring"""
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
__snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=a_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(a_ , return_tensors="np" )
__snake_case = processor(images=a_ , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def A ( self : str ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=a_ )
__snake_case = [tf.ones((1, 3, 5, 5) )]
__snake_case = [[1_764, 2_646]]
__snake_case = [[683, 1_024]]
__snake_case = processor.post_process_masks(a_ , a_ , a_ , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__snake_case = processor.post_process_masks(
a_ , tf.convert_to_tensor(a_ ) , tf.convert_to_tensor(a_ ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
__snake_case = [np.ones((1, 3, 5, 5) )]
__snake_case = processor.post_process_masks(
a_ , np.array(a_ ) , np.array(a_ ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
__snake_case = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__snake_case = processor.post_process_masks(
a_ , np.array(a_ ) , np.array(a_ ) , return_tensors="tf" )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = tempfile.mkdtemp()
__snake_case = SamImageProcessor()
__snake_case = SamProcessor(a_ )
processor.save_pretrained(self.tmpdirname )
def A ( self : int , **a_ : List[str] ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a_ ).image_processor
def A ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : Any ):
"""simple docstring"""
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=a_ )
__snake_case = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__snake_case = [tf.convert_to_tensor(a_ )]
__snake_case = [torch.tensor(a_ )]
__snake_case = [[1_764, 2_646]]
__snake_case = [[683, 1_024]]
__snake_case = processor.post_process_masks(
a_ , a_ , a_ , return_tensors="tf" )
__snake_case = processor.post_process_masks(
a_ , a_ , a_ , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = SamProcessor(image_processor=a_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(a_ , return_tensors="pt" )["pixel_values"].numpy()
__snake_case = processor(images=a_ , return_tensors="pt" )["pixel_values"].numpy()
__snake_case = image_processor(a_ , return_tensors="tf" )["pixel_values"].numpy()
__snake_case = processor(images=a_ , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertTrue(np.allclose(a_ , a_ ) )
| 706 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
a : Dict = '''sshleifer/bart-tiny-random'''
a : str = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : Union[str, Any] ):
"""simple docstring"""
return AutoConfig.from_pretrained(a_ )
def A ( self : str ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=a_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case , *__snake_case = create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def A ( self : Dict ):
"""simple docstring"""
with self.assertRaises(a_ ):
create_student_by_copying_alternating_layers(a_ , tempfile.mkdtemp() , e=a_ , d=a_ )
| 680 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a : List[str] = logging.get_logger(__name__)
a : Optional[int] = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """bloom"""
__SCREAMING_SNAKE_CASE = ["""past_key_values"""]
__SCREAMING_SNAKE_CASE = {
"""num_hidden_layers""": """n_layer""",
"""num_attention_heads""": """n_head""",
}
def __init__( self : List[str] , a_ : Dict=250_880 , a_ : List[Any]=64 , a_ : int=2 , a_ : int=8 , a_ : Optional[Any]=1e-5 , a_ : Dict=0.02 , a_ : Union[str, Any]=True , a_ : Optional[int]=1 , a_ : int=2 , a_ : Dict=False , a_ : Tuple=0.0 , a_ : str=0.0 , a_ : int=1 , a_ : List[Any]=False , **a_ : Any , ):
"""simple docstring"""
__snake_case = vocab_size
# Backward compatibility with n_embed kwarg
__snake_case = kwargs.pop("n_embed" , a_ )
__snake_case = hidden_size if n_embed is None else n_embed
__snake_case = n_layer
__snake_case = n_head
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = pretraining_tp
__snake_case = apply_residual_connection_post_layernorm
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = bos_token_id
__snake_case = eos_token_id
__snake_case = slow_but_exact
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.12""" )
def __init__( self : Optional[Any] , a_ : PretrainedConfig , a_ : str = "default" , a_ : List[PatchingSpec] = None , a_ : bool = False , ):
"""simple docstring"""
super().__init__(a_ , task=a_ , patching_specs=a_ , use_past=a_ )
if not getattr(self._config , "pad_token_id" , a_ ):
# TODO: how to do that better?
__snake_case = 0
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(a_ , direction="inputs" , inverted_values_shape=a_ )
__snake_case = {0: "batch", 1: "past_sequence + sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return common_inputs
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return self._config.n_head
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return 1e-3
def A ( self : Union[str, Any] , a_ : "PreTrainedTokenizer" , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , ):
"""simple docstring"""
__snake_case = super(a_ , self ).generate_dummy_inputs(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
# We need to order the input in the way they appears in the forward()
__snake_case = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case , __snake_case = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = self._config.hidden_size // self.num_attention_heads
__snake_case = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__snake_case = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__snake_case = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers )
]
__snake_case = common_inputs["attention_mask"]
if self.use_past:
__snake_case = ordered_inputs["attention_mask"].dtype
__snake_case = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
return ordered_inputs
@property
def A ( self : Tuple ):
"""simple docstring"""
return 13
| 707 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
a : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """sequence-classification"""
def __init__( self : List[str] , a_ : str ):
"""simple docstring"""
if type(a_ ) == dict:
__snake_case = Namespace(**a_ )
__snake_case = glue_output_modes[hparams.task]
__snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(a_ , a_ , self.mode )
def A ( self : Union[str, Any] , **a_ : List[Any] ):
"""simple docstring"""
return self.model(**a_ )
def A ( self : int , a_ : Optional[Any] , a_ : int ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case = outputs[0]
__snake_case = self.trainer.lr_schedulers[0]["scheduler"]
__snake_case = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.hparams
__snake_case = processors[args.task]()
__snake_case = processor.get_labels()
for mode in ["train", "dev"]:
__snake_case = self._feature_file(a_ )
if os.path.exists(a_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , a_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__snake_case = convert_examples_to_features(
a_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , a_ )
torch.save(a_ , a_ )
def A ( self : Optional[int] , a_ : str , a_ : int , a_ : bool = False ):
"""simple docstring"""
__snake_case = "dev" if mode == "test" else mode
__snake_case = self._feature_file(a_ )
logger.info("Loading features from cached file %s" , a_ )
__snake_case = torch.load(a_ )
__snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a_ , a_ , a_ , a_ ) , batch_size=a_ , shuffle=a_ , )
def A ( self : int , a_ : List[str] , a_ : Tuple ):
"""simple docstring"""
__snake_case = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__snake_case = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__snake_case = self(**a_ )
__snake_case , __snake_case = outputs[:2]
__snake_case = logits.detach().cpu().numpy()
__snake_case = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Dict , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__snake_case = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__snake_case = np.argmax(a_ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__snake_case = np.squeeze(a_ )
__snake_case = np.concatenate([x["target"] for x in outputs] , axis=0 )
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , a_ , a_ )}
__snake_case = dict(results.items() )
__snake_case = results
return ret, preds_list, out_label_list
def A ( self : Tuple , a_ : list ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : int , a_ : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = self._eval_end(a_ )
__snake_case = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( a_ : str , a_ : Any ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(a_ , a_ )
parser.add_argument(
"--max_seq_length" , default=128 , type=a_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=a_ , required=a_ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=a_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def __UpperCAmelCase ( ) -> Union[str, Any]:
__snake_case = argparse.ArgumentParser()
add_generic_args(_UpperCAmelCase , os.getcwd() )
__snake_case = GLUETransformer.add_model_specific_args(_UpperCAmelCase , os.getcwd() )
__snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__snake_case = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__snake_case = GLUETransformer(_UpperCAmelCase )
__snake_case = generic_train(_UpperCAmelCase , _UpperCAmelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__snake_case = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCAmelCase ) )
__snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : List[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> List[str]:
__snake_case = b.T
__snake_case = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
__snake_case = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
__snake_case = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = aa[:, None] - 2 * ab + ba[None, :]
return d
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
__snake_case = x.reshape(-1 , 3 )
__snake_case = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Union[str, Any] , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : bool = True , a_ : Dict[str, int] = None , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : bool = True , a_ : bool = True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(**a_ )
__snake_case = size if size is not None else {"height": 256, "width": 256}
__snake_case = get_size_dict(a_ )
__snake_case = np.array(a_ ) if clusters is not None else None
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_normalize
__snake_case = do_color_quantize
def A ( self : List[Any] , a_ : np.ndarray , a_ : Dict[str, int] , a_ : PILImageResampling = PILImageResampling.BILINEAR , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : int , ):
"""simple docstring"""
__snake_case = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
a_ , size=(size["height"], size["width"]) , resample=a_ , data_format=a_ , **a_ )
def A ( self : int , a_ : np.ndarray , a_ : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
__snake_case = rescale(image=a_ , scale=1 / 127.5 , data_format=a_ )
__snake_case = image - 1
return image
def A ( self : Optional[Any] , a_ : ImageInput , a_ : bool = None , a_ : Dict[str, int] = None , a_ : PILImageResampling = None , a_ : bool = None , a_ : Optional[bool] = None , a_ : Optional[Union[List[List[int]], np.ndarray]] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **a_ : List[str] , ):
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(a_ )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__snake_case = clusters if clusters is not None else self.clusters
__snake_case = np.array(a_ )
__snake_case = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True." )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(a_ ) for image in images]
if do_resize:
__snake_case = [self.resize(image=a_ , size=a_ , resample=a_ ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=a_ ) for image in images]
if do_color_quantize:
__snake_case = [to_channel_dimension_format(a_ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__snake_case = np.array(a_ )
__snake_case = color_quantize(a_ , a_ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__snake_case = images.shape[0]
__snake_case = images.reshape(a_ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__snake_case = list(a_ )
else:
__snake_case = [to_channel_dimension_format(a_ , a_ ) for image in images]
__snake_case = {"input_ids": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 708 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("dataset_size" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("input_in_memory_max_size" , ["default", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , "IN_MEMORY_MAX_SIZE" , _UpperCAmelCase )
__snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__snake_case = dataset_size < in_memory_max_size
else:
__snake_case = False
__snake_case = is_small_dataset(_UpperCAmelCase )
assert result == expected
| 680 | 0 |
'''simple docstring'''
from math import sqrt
def __UpperCAmelCase ( _UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( _UpperCAmelCase : int = 1_00_01 ) -> int:
__snake_case = 0
__snake_case = 1
while count != nth and number < 3:
number += 1
if is_prime(_UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(_UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 709 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __UpperCAmelCase ( _UpperCAmelCase : float ) -> float:
if edge <= 0 or not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Length must be a positive." )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : int ):
"""simple docstring"""
__snake_case = n
__snake_case = [None] * self.n
__snake_case = 0 # index of the first element
__snake_case = 0
__snake_case = 0
def __len__( self : Optional[Any] ):
"""simple docstring"""
return self.size
def A ( self : Optional[int] ):
"""simple docstring"""
return self.size == 0
def A ( self : int ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def A ( self : Any , a_ : List[str] ):
"""simple docstring"""
if self.size >= self.n:
raise Exception("QUEUE IS FULL" )
__snake_case = data
__snake_case = (self.rear + 1) % self.n
self.size += 1
return self
def A ( self : Dict ):
"""simple docstring"""
if self.size == 0:
raise Exception("UNDERFLOW" )
__snake_case = self.array[self.front]
__snake_case = None
__snake_case = (self.front + 1) % self.n
self.size -= 1
return temp
| 710 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
a : Any = 6_378_137.0
a : List[Any] = 6_356_752.314_245
a : Dict = 6_378_137
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> float:
__snake_case = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
__snake_case = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__snake_case = haversine_distance(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__snake_case = (b_lata + b_lata) / 2
__snake_case = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__snake_case = (sin(_UpperCAmelCase ) ** 2) * (cos(_UpperCAmelCase ) ** 2)
__snake_case = cos(sigma / 2 ) ** 2
__snake_case = (sigma - sin(_UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__snake_case = (cos(_UpperCAmelCase ) ** 2) * (sin(_UpperCAmelCase ) ** 2)
__snake_case = sin(sigma / 2 ) ** 2
__snake_case = (sigma + sin(_UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(a_ , a_ )
def A ( self : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(a_ ) / "preprocessor_config.json"
__snake_case = Path(a_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
json.dump({"model_type": "clip"} , open(a_ , "w" ) )
__snake_case = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(a_ ) / "preprocessor_config.json"
__snake_case = Path(a_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
json.dump({"model_type": "clip"} , open(a_ , "w" ) )
__snake_case = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case = Path(a_ ) / "preprocessor_config.json"
__snake_case = Path(a_ ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
json.dump({"model_type": "clip"} , open(a_ , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case = AutoImageProcessor.from_pretrained(a_ ).to_dict()
config_dict.pop("image_processor_type" )
__snake_case = CLIPImageProcessor(**a_ )
# save in new folder
model_config.save_pretrained(a_ )
config.save_pretrained(a_ )
__snake_case = AutoImageProcessor.from_pretrained(a_ )
# make sure private variable is not incorrectly saved
__snake_case = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(a_ , a_ )
def A ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(a_ ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
__snake_case = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
def A ( self : int ):
"""simple docstring"""
with self.assertRaisesRegex(
a_ , "clip-base is not a local folder and is not a valid model identifier" ):
__snake_case = AutoImageProcessor.from_pretrained("clip-base" )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
a_ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__snake_case = AutoImageProcessor.from_pretrained(a_ , revision="aaaaaa" )
def A ( self : Optional[int] ):
"""simple docstring"""
with self.assertRaisesRegex(
a_ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__snake_case = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def A ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ):
__snake_case = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a_ ):
__snake_case = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a_ )
__snake_case = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a_ )
__snake_case = AutoImageProcessor.from_pretrained(a_ , trust_remote_code=a_ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def A ( self : str ):
"""simple docstring"""
try:
AutoConfig.register("custom" , a_ )
AutoImageProcessor.register(a_ , a_ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a_ ):
AutoImageProcessor.register(a_ , a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = Path(a_ ) / "preprocessor_config.json"
__snake_case = Path(a_ ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(a_ , "w" ) , )
json.dump({"model_type": "clip"} , open(a_ , "w" ) )
__snake_case = CustomImageProcessor.from_pretrained(a_ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a_ )
__snake_case = AutoImageProcessor.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A ( self : str ):
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = True
try:
AutoConfig.register("custom" , a_ )
AutoImageProcessor.register(a_ , a_ )
# If remote code is not set, the default is to use local
__snake_case = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__snake_case = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__snake_case = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=a_ )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(a_ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> str:
# Initialise PyTorch model
__snake_case = TaConfig.from_json_file(_UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
__snake_case = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 712 |
'''simple docstring'''
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Dict , a_ : Union[str, Any] , a_ : Tuple ):
"""simple docstring"""
__snake_case = name
__snake_case = value
__snake_case = weight
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def A ( self : Any ):
"""simple docstring"""
return self.value
def A ( self : str ):
"""simple docstring"""
return self.name
def A ( self : int ):
"""simple docstring"""
return self.weight
def A ( self : Tuple ):
"""simple docstring"""
return self.value / self.weight
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case = []
for i in range(len(_UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> int:
__snake_case = sorted(_UpperCAmelCase , key=_UpperCAmelCase , reverse=_UpperCAmelCase )
__snake_case = []
__snake_case , __snake_case = 0.0, 0.0
for i in range(len(_UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 680 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> str:
return " ".join(
"".join(word[::-1] ) if len(_UpperCAmelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 713 |
'''simple docstring'''
import os
from math import logaa
def __UpperCAmelCase ( _UpperCAmelCase : str = "base_exp.txt" ) -> int:
__snake_case = 0
__snake_case = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) ):
__snake_case , __snake_case = list(map(_UpperCAmelCase , line.split("," ) ) )
if x * logaa(_UpperCAmelCase ) > largest:
__snake_case = x * logaa(_UpperCAmelCase )
__snake_case = i + 1
return result
if __name__ == "__main__":
print(solution())
| 680 | 0 |
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=10_24 ) -> Union[str, Any]:
__snake_case , __snake_case = [], []
__snake_case = list(zip(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case , __snake_case = sorted_examples[0]
def is_too_big(_UpperCAmelCase ):
return tok(_UpperCAmelCase , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__snake_case = new_src + " " + src
__snake_case = new_tgt + " " + tgt
if is_too_big(_UpperCAmelCase ) or is_too_big(_UpperCAmelCase ): # cant fit, finalize example
finished_src.append(_UpperCAmelCase )
finished_tgt.append(_UpperCAmelCase )
__snake_case , __snake_case = src, tgt
else: # can fit, keep adding
__snake_case , __snake_case = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(_UpperCAmelCase )
finished_tgt.append(_UpperCAmelCase )
return finished_src, finished_tgt
def __UpperCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
__snake_case = Path(_UpperCAmelCase )
save_path.mkdir(exist_ok=_UpperCAmelCase )
for split in ["train"]:
__snake_case , __snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
__snake_case = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()]
__snake_case = [x.rstrip() for x in Path(_UpperCAmelCase ).open().readlines()]
__snake_case , __snake_case = pack_examples(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
print(F'''packed {split} split from {len(_UpperCAmelCase )} examples -> {len(_UpperCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("w" ).write("\n".join(_UpperCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("w" ).write("\n".join(_UpperCAmelCase ) )
for split in ["val", "test"]:
__snake_case , __snake_case = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(_UpperCAmelCase , save_path / F'''{split}.target''' )
def __UpperCAmelCase ( ) -> Tuple:
__snake_case = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=_UpperCAmelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=_UpperCAmelCase , default=1_28 )
parser.add_argument("--data_dir" , type=_UpperCAmelCase )
parser.add_argument("--save_path" , type=_UpperCAmelCase )
__snake_case = parser.parse_args()
__snake_case = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(_UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 714 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : Optional[int] = {
'''facebook/blenderbot_small-90M''': 512,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = BlenderbotSmallTokenizer
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Dict=None , a_ : int="<|endoftext|>" , a_ : str="<|endoftext|>" , a_ : Any="<|endoftext|>" , a_ : Dict=False , a_ : Optional[Any]=True , **a_ : Dict , ):
"""simple docstring"""
super().__init__(
ByteLevelBPETokenizer(
vocab=a_ , merges=a_ , add_prefix_space=a_ , trim_offsets=a_ , ) , bos_token=a_ , eos_token=a_ , unk_token=a_ , **a_ , )
__snake_case = add_prefix_space
def A ( self : Dict , a_ : int , a_ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 680 | 0 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a : Tuple = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def A ( self : Union[str, Any] , a_ : List[str] , a_ : Optional[int] , a_ : List[str]=None , a_ : Any=None ):
"""simple docstring"""
__snake_case = self.layer[current_layer](a_ , a_ , head_mask[current_layer] )
__snake_case = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : int , a_ : int ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = BertEncoderWithPabee(a_ )
self.init_weights()
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
def A ( self : Optional[int] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = threshold
def A ( self : Optional[Any] , a_ : Union[str, Any] ):
"""simple docstring"""
__snake_case = patience
def A ( self : Any ):
"""simple docstring"""
__snake_case = 0
__snake_case = 0
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.inference_layers_num / self.inference_instances_num
__snake_case = (
f'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
f''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(a_ )
@add_start_docstrings_to_model_forward(a_ )
def A ( self : Dict , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Optional[int]=None , a_ : int=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=None , a_ : int=None , a_ : Any=None , a_ : Optional[Any]=None , a_ : Any=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__snake_case = input_ids.size()
elif inputs_embeds is not None:
__snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
if token_type_ids is None:
__snake_case = torch.zeros(a_ , dtype=torch.long , device=a_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__snake_case = self.get_extended_attention_mask(a_ , a_ , a_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__snake_case , __snake_case , __snake_case = encoder_hidden_states.size()
__snake_case = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__snake_case = torch.ones(a_ , device=a_ )
__snake_case = self.invert_attention_mask(a_ )
else:
__snake_case = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__snake_case = self.get_head_mask(a_ , self.config.num_hidden_layers )
__snake_case = self.embeddings(
input_ids=a_ , position_ids=a_ , token_type_ids=a_ , inputs_embeds=a_ )
__snake_case = embedding_output
if self.training:
__snake_case = []
for i in range(self.config.num_hidden_layers ):
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](output_dropout(a_ ) )
res.append(a_ )
elif self.patience == 0: # Use all layers for inference
__snake_case = self.encoder(
a_ , attention_mask=a_ , head_mask=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
__snake_case = self.pooler(encoder_outputs[0] )
__snake_case = [output_layers[self.config.num_hidden_layers - 1](a_ )]
else:
__snake_case = 0
__snake_case = None
__snake_case = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__snake_case = self.encoder.adaptive_forward(
a_ , current_layer=a_ , attention_mask=a_ , head_mask=a_ )
__snake_case = self.pooler(a_ )
__snake_case = output_layers[i](a_ )
if regression:
__snake_case = logits.detach()
if patient_result is not None:
__snake_case = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__snake_case = 0
else:
__snake_case = logits.detach().argmax(dim=1 )
if patient_result is not None:
__snake_case = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(a_ ) ):
patient_counter += 1
else:
__snake_case = 0
__snake_case = logits
if patient_counter == self.patience:
break
__snake_case = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _UpperCamelCase , )
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : List[str] , a_ : Tuple ):
"""simple docstring"""
super().__init__(a_ )
__snake_case = config.num_labels
__snake_case = BertModelWithPabee(a_ )
__snake_case = nn.Dropout(config.hidden_dropout_prob )
__snake_case = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(a_ )
def A ( self : int , a_ : str=None , a_ : Tuple=None , a_ : Union[str, Any]=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Union[str, Any]=None , a_ : Tuple=None , ):
"""simple docstring"""
__snake_case = self.bert(
input_ids=a_ , attention_mask=a_ , token_type_ids=a_ , position_ids=a_ , head_mask=a_ , inputs_embeds=a_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__snake_case = (logits[-1],)
if labels is not None:
__snake_case = None
__snake_case = 0
for ix, logits_item in enumerate(a_ ):
if self.num_labels == 1:
# We are doing regression
__snake_case = MSELoss()
__snake_case = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__snake_case = CrossEntropyLoss()
__snake_case = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__snake_case = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__snake_case = (total_loss / total_weights,) + outputs
return outputs
| 715 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 0 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a : int = 16
a : Tuple = 32
def A__ ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int = 16 , _UpperCAmelCase : str = "bert-base-cased" ) -> Any:
__snake_case = AutoTokenizer.from_pretrained(_UpperCAmelCase )
__snake_case = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
__snake_case = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__snake_case = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_UpperCAmelCase , padding="max_length" , max_length=1_28 , return_tensors="pt" )
return tokenizer.pad(_UpperCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
__snake_case = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
__snake_case = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
def A__ ( _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> str:
model.eval()
__snake_case = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case = model(**_UpperCAmelCase )
__snake_case = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__snake_case , __snake_case = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_UpperCAmelCase ) - 1:
__snake_case = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__snake_case = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
__snake_case = metric.compute()
return eval_metric["accuracy"]
def A__ ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> Union[str, Any]:
# Initialize accelerator
__snake_case = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case = config["lr"]
__snake_case = int(config["num_epochs"] )
__snake_case = int(config["seed"] )
__snake_case = int(config["batch_size"] )
__snake_case = args.model_name_or_path
set_seed(_UpperCAmelCase )
__snake_case , __snake_case = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case = AutoModelForSequenceClassification.from_pretrained(_UpperCAmelCase , return_dict=_UpperCAmelCase )
# Instantiate optimizer
__snake_case = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__snake_case = optimizer_cls(params=model.parameters() , lr=_UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
__snake_case = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
__snake_case = 1
__snake_case = (len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__snake_case = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=0 , num_training_steps=_UpperCAmelCase , )
else:
__snake_case = DummyScheduler(_UpperCAmelCase , total_num_steps=_UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
__snake_case = 0
# We also need to keep track of the stating epoch so files are named properly
__snake_case = 0
__snake_case = evaluate.load("glue" , "mrpc" )
__snake_case = num_epochs
if args.partial_train_epoch is not None:
__snake_case = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
__snake_case = args.resume_from_checkpoint.split("epoch_" )[1]
__snake_case = ""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
__snake_case = int(_UpperCAmelCase ) + 1
__snake_case = evaluation_loop(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
accelerator.print("resumed checkpoint performance:" , _UpperCAmelCase )
accelerator.print("resumed checkpoint's scheduler's lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers's lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , "r" ) as f:
__snake_case = json.load(_UpperCAmelCase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
__snake_case = {}
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
__snake_case = model(**_UpperCAmelCase )
__snake_case = outputs.loss
__snake_case = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
__snake_case = F'''epoch_{epoch}'''
__snake_case = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
__snake_case = evaluation_loop(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = accuracy
__snake_case = lr_scheduler.get_lr()[0]
__snake_case = optimizer.param_groups[0]["lr"]
__snake_case = epoch
__snake_case = overall_step
accelerator.print(F'''epoch {epoch}:''' , _UpperCAmelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , "w" ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def A__ ( ) -> Tuple:
__snake_case = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_UpperCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_UpperCAmelCase , )
parser.add_argument(
"--output_dir" , type=_UpperCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_UpperCAmelCase , default=_UpperCAmelCase , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_UpperCAmelCase , default=2 , help="Number of train epochs." , )
__snake_case = parser.parse_args()
__snake_case = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 716 |
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
a : Optional[Any] = float('''nan''')
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , a_ : Optional[int] ):
"""simple docstring"""
__snake_case = sys.stdout
__snake_case = open(a_ , "a" )
def __getattr__( self : str , a_ : List[Any] ):
"""simple docstring"""
return getattr(self.stdout , a_ )
def A ( self : Union[str, Any] , a_ : List[Any] ):
"""simple docstring"""
self.stdout.write(a_ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , a_ , 0 , re.M ) )
def __UpperCAmelCase ( _UpperCAmelCase : int=80 , _UpperCAmelCase : Any=False ) -> Optional[int]:
__snake_case = []
# deal with critical env vars
__snake_case = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
__snake_case = os.environ.get(_UpperCAmelCase , _UpperCAmelCase )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
__snake_case = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(_UpperCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
__snake_case = []
__snake_case = ""
while len(_UpperCAmelCase ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCAmelCase )
__snake_case = ""
return "\\\n".join(_UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
# unwrap multi-line input
__snake_case = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
__snake_case = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
__snake_case = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> str:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
__snake_case = subprocess.run(_UpperCAmelCase , capture_output=_UpperCAmelCase , text=_UpperCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
__snake_case = variation.replace(" " , "-" )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(_UpperCAmelCase ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
__snake_case = json.load(_UpperCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ) -> Dict:
__snake_case = []
__snake_case = []
__snake_case = F'''{id}: {variation:<{longest_variation_len}}'''
__snake_case = F'''{preamble}: '''
__snake_case = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCAmelCase ) , desc=_UpperCAmelCase , leave=_UpperCAmelCase ):
__snake_case = process_run_single(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCAmelCase ):
metrics.append(_UpperCAmelCase )
results.append(_UpperCAmelCase )
outcome += "✓"
else:
outcome += "✘"
__snake_case = F'''\33[2K\r{outcome}'''
if len(_UpperCAmelCase ) > 0:
__snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
__snake_case = round(mean_metrics[target_metric_key] , 2 )
__snake_case = F'''{outcome} {mean_target}'''
if len(_UpperCAmelCase ) > 1:
results_str += F''' {tuple(round(_UpperCAmelCase , 2 ) for x in results )}'''
print(_UpperCAmelCase )
__snake_case = variation
return mean_metrics
else:
print(_UpperCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def __UpperCAmelCase ( ) -> Optional[int]:
__snake_case = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def __UpperCAmelCase ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ) -> List[Any]:
__snake_case = pd.DataFrame(_UpperCAmelCase )
__snake_case = "variation"
__snake_case = "diff_%"
__snake_case = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
__snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCAmelCase ):
# as a fallback, use the minimal value as the sentinel
__snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCAmelCase ):
__snake_case = df.apply(
lambda _UpperCAmelCase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
__snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys]
__snake_case = df.reindex(_UpperCAmelCase , axis="columns" ) # reorder cols
# capitalize
__snake_case = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
__snake_case = df.rename(lambda _UpperCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
__snake_case = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCAmelCase , floatfmt=".2f" )]
print("\n\n".join(_UpperCAmelCase ) )
def __UpperCAmelCase ( ) -> Dict:
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=_UpperCAmelCase , type=_UpperCAmelCase , nargs="+" , required=_UpperCAmelCase , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=_UpperCAmelCase , type=_UpperCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=_UpperCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=_UpperCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=_UpperCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=_UpperCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
__snake_case = parser.parse_args()
__snake_case = args.output_dir
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
__snake_case = get_base_command(_UpperCAmelCase , _UpperCAmelCase )
# split each dimension into its --foo variations
__snake_case = [list(map(str.strip , re.split(R"\|" , _UpperCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
__snake_case = list(map(str.strip , map(" ".join , itertools.product(*_UpperCAmelCase ) ) ) )
__snake_case = max(len(_UpperCAmelCase ) for x in variations )
# split wanted keys
__snake_case = args.report_metric_keys.split()
# capture prints into a log file for convenience
__snake_case = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
__snake_case = Tee(_UpperCAmelCase )
print(F'''\n*** Running {len(_UpperCAmelCase )} benchmarks:''' )
print(F'''Base command: {" ".join(_UpperCAmelCase )}''' )
__snake_case = "variation"
__snake_case = []
for id, variation in enumerate(tqdm(_UpperCAmelCase , desc="Total completion: " , leave=_UpperCAmelCase ) ):
__snake_case = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.repeat_times , _UpperCAmelCase , args.verbose , ) )
process_results(_UpperCAmelCase , args.target_metric_key , _UpperCAmelCase , args.base_variation , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 680 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.