code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
A__ = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
}
| 230 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowerCamelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330 | 0 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__a = False
class A__ ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : List[Any] = "A painting of a squirrel eating a burger "
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = pipe(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
_UpperCAmelCase : int = VersatileDiffusionTextToImagePipeline.from_pretrained(__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Tuple = generator.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Tuple = "A painting of a squirrel eating a burger "
_UpperCAmelCase : List[Any] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = pipe(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" ).images
_UpperCAmelCase : List[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_UpperCAmelCase : Any = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 145 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 0 |
'''simple docstring'''
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = val
_snake_case = None
_snake_case = None
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
_snake_case = Node(__UpperCAmelCase )
else:
self.left.insert(__UpperCAmelCase )
elif val > self.val:
if self.right is None:
_snake_case = Node(__UpperCAmelCase )
else:
self.right.insert(__UpperCAmelCase )
else:
_snake_case = val
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[Any]:
# Recursive traversal
if root:
inorder(root.left , _UpperCamelCase )
res.append(root.val )
inorder(root.right , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[str]:
# Build BST
if len(_UpperCamelCase ) == 0:
return arr
_snake_case = Node(arr[0] )
for i in range(1 , len(_UpperCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
_snake_case = []
inorder(_UpperCamelCase , _UpperCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 42 |
from string import ascii_lowercase, ascii_uppercase
def a__ ( _UpperCamelCase : str ):
if not sentence:
return ""
__lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 330 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__magic_name__: str = logging.get_logger("transformers.models.speecht5")
__magic_name__: Optional[int] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__magic_name__: Tuple = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__magic_name__: Optional[int] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__magic_name__: List[str] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__magic_name__: str = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__magic_name__: Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__magic_name__: int = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__magic_name__: str = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__magic_name__: List[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__magic_name__: Dict = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__magic_name__: Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__magic_name__: str = []
__magic_name__: Optional[int] = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__magic_name__: List[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__magic_name__: int = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__magic_name__: str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
for attribute in key.split(""".""" ):
__magic_name__ : Optional[Any] = getattr(_UpperCamelCase, _UpperCamelCase )
if weight_type is not None:
__magic_name__ : Optional[int] = getattr(_UpperCamelCase, _UpperCamelCase ).shape
else:
__magic_name__ : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__magic_name__ : List[Any] = value
elif weight_type == "weight_g":
__magic_name__ : Dict = value
elif weight_type == "weight_v":
__magic_name__ : str = value
elif weight_type == "bias":
__magic_name__ : Union[str, Any] = value
elif weight_type == "running_mean":
__magic_name__ : str = value
elif weight_type == "running_var":
__magic_name__ : List[Any] = value
elif weight_type == "num_batches_tracked":
__magic_name__ : Optional[int] = value
else:
__magic_name__ : List[Any] = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__magic_name__ ,__magic_name__ : Any = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : List[Any] = []
if task == "s2t":
__magic_name__ : int = hf_model.speechta.encoder.prenet.feature_encoder
__magic_name__ : Tuple = MAPPING_S2T
__magic_name__ : Dict = IGNORE_KEYS_S2T
elif task == "t2s":
__magic_name__ : str = None
__magic_name__ : Any = MAPPING_T2S
__magic_name__ : Any = IGNORE_KEYS_T2S
elif task == "s2s":
__magic_name__ : Any = hf_model.speechta.encoder.prenet.feature_encoder
__magic_name__ : Optional[Any] = MAPPING_S2S
__magic_name__ : Optional[int] = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_UpperCamelCase, _UpperCamelCase ):
logger.info(f'{name} was ignored' )
continue
__magic_name__ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, hf_model.config.feat_extract_norm == """group""", )
__magic_name__ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__magic_name__ ,__magic_name__ : Dict = key.split(""".*.""" )
if prefix in name and suffix in name:
__magic_name__ : Union[str, Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__magic_name__ : List[str] = True
if "*" in mapped_key:
__magic_name__ : str = name.split(_UpperCamelCase )[0].split(""".""" )[-2]
__magic_name__ : Optional[Any] = mapped_key.replace("""*""", _UpperCamelCase )
if "weight_g" in name:
__magic_name__ : Any = """weight_g"""
elif "weight_v" in name:
__magic_name__ : Tuple = """weight_v"""
elif "bias" in name:
__magic_name__ : List[str] = """bias"""
elif "weight" in name:
__magic_name__ : Optional[int] = """weight"""
elif "running_mean" in name:
__magic_name__ : List[Any] = """running_mean"""
elif "running_var" in name:
__magic_name__ : int = """running_var"""
elif "num_batches_tracked" in name:
__magic_name__ : Any = """num_batches_tracked"""
else:
__magic_name__ : Optional[int] = None
set_recursively(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCamelCase ( _A, _A, _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = full_name.split("""conv_layers.""" )[-1]
__magic_name__ : Union[str, Any] = name.split(""".""" )
__magic_name__ : Optional[int] = int(items[0] )
__magic_name__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__magic_name__ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__magic_name__ : List[str] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__magic_name__ : Union[str, Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__magic_name__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def UpperCamelCase ( _A, _A, _A, _A=None, _A=None, _A=None, ):
"""simple docstring"""
if config_path is not None:
__magic_name__ : Optional[Any] = SpeechTaConfig.from_pretrained(_UpperCamelCase )
else:
__magic_name__ : Union[str, Any] = SpeechTaConfig()
if task == "s2t":
__magic_name__ : str = config.max_text_positions
__magic_name__ : Tuple = SpeechTaForSpeechToText(_UpperCamelCase )
elif task == "t2s":
__magic_name__ : Optional[int] = 1876
__magic_name__ : List[str] = 600
__magic_name__ : Optional[Any] = config.max_speech_positions
__magic_name__ : Optional[int] = SpeechTaForTextToSpeech(_UpperCamelCase )
elif task == "s2s":
__magic_name__ : Optional[Any] = 1876
__magic_name__ : List[Any] = config.max_speech_positions
__magic_name__ : List[str] = SpeechTaForSpeechToSpeech(_UpperCamelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
__magic_name__ : Union[str, Any] = SpeechTaTokenizer(_UpperCamelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__magic_name__ : List[Any] = AddedToken("""<mask>""", lstrip=_UpperCamelCase, rstrip=_UpperCamelCase )
__magic_name__ : Optional[int] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
__magic_name__ : Optional[Any] = SpeechTaFeatureExtractor()
__magic_name__ : Any = SpeechTaProcessor(tokenizer=_UpperCamelCase, feature_extractor=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__magic_name__ : str = torch.load(_UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["""model"""], _UpperCamelCase, _UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
__magic_name__: int = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__magic_name__: Tuple = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 342 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( lowerCAmelCase__ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 330 | 0 |
'''simple docstring'''
def snake_case_ (_a : int ):
UpperCAmelCase = int(_UpperCamelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(_UpperCamelCase )
UpperCAmelCase , UpperCAmelCase = divmod(_UpperCamelCase , 2 )
return binary_recursive(_UpperCamelCase ) + str(_UpperCamelCase )
def snake_case_ (_a : str ):
UpperCAmelCase = str(_UpperCamelCase ).strip()
if not number:
raise ValueError('''No input value was provided''' )
UpperCAmelCase = '''-''' if number.startswith('''-''' ) else ''''''
UpperCAmelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F"{negative}0b{binary_recursive(int(_UpperCamelCase ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 34 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A : Optional[int] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
lowercase__ = k.replace(_UpperCamelCase , _UpperCamelCase )
return k
def UpperCamelCase ( __magic_name__ : dict , __magic_name__ : dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
lowercase__ = PegasusConfig(**_UpperCamelCase )
lowercase__ = PegasusForConditionalGeneration(_UpperCamelCase )
lowercase__ = torch_model.model.state_dict()
lowercase__ = {}
for k, v in tf_weights.items():
lowercase__ = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(f'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
lowercase__ = v.T
lowercase__ = torch.tensor(_UpperCamelCase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
lowercase__ = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
lowercase__ = mapping["""shared.weight"""]
lowercase__ = mapping["""shared.weight"""]
lowercase__ = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**_UpperCamelCase )
lowercase__ , lowercase__ = torch_model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
lowercase__ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], f'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase ( __magic_name__ : str="./ckpt/aeslc/model.ckpt-32000" ) -> Any:
"""simple docstring"""
lowercase__ = tf.train.list_variables(_UpperCamelCase )
lowercase__ = {}
lowercase__ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(_UpperCamelCase , desc="""converting tf checkpoint to dict""" ):
lowercase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase__ = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase )
lowercase__ = array
return tf_weights
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> List[str]:
"""simple docstring"""
lowercase__ = Path(_UpperCamelCase ).parent.name
lowercase__ = task_specific_params[f'''summarization_{dataset}''']["""max_position_embeddings"""]
lowercase__ = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
lowercase__ = get_tf_weights_as_numpy(_UpperCamelCase )
lowercase__ = task_specific_params[f'''summarization_{dataset}''']
if dataset == "large":
lowercase__ = task_specific_params
lowercase__ = convert_pegasus(_UpperCamelCase , _UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
lowercase__ = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(_UpperCamelCase , Path(_UpperCamelCase ) / """pytorch_model.bin""" )
if __name__ == "__main__":
A : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
A : Tuple = parser.parse_args()
if args.save_dir is None:
A : Any = Path(args.tf_ckpt_path).parent.name
A : List[Any] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 305 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ):
'''simple docstring'''
__lowerCamelCase = p_stop
__lowerCamelCase = max_length
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCamelCase = random.random() < self.p_stop
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = [
BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
for i in range(2 )
]
__lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ):
'''simple docstring'''
random.seed(__UpperCAmelCase )
__lowerCamelCase = list(__UpperCAmelCase )
__lowerCamelCase = [
IterableDatasetShard(
__UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , )
for i in range(__UpperCAmelCase )
]
__lowerCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCAmelCase )
iterable_dataset_lists.append(list(__UpperCAmelCase ) )
__lowerCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 )
__lowerCamelCase = []
for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
reference += reference
self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 42
__lowerCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
# Edge case with a very small dataset
__lowerCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 )
self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase ( self ):
'''simple docstring'''
Accelerator()
__lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 330 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 110 |
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 330 | 0 |
import os
SCREAMING_SNAKE_CASE__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
__lowercase = 0
__lowercase = 0
while index < len(_UpperCamelCase ) - 1:
__lowercase = SYMBOLS[numerals[index]]
__lowercase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> str:
__lowercase = ''
__lowercase = num // 1000
numerals += m_count * "M"
num %= 1000
__lowercase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__lowercase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str = "/p089_roman.txt" ) -> Tuple:
__lowercase = 0
with open(os.path.dirname(_UpperCamelCase ) + roman_numerals_filename ) as filea:
__lowercase = filea.readlines()
for line in lines:
__lowercase = line.strip()
__lowercase = parse_roman_numerals(_UpperCamelCase )
__lowercase = generate_roman_numerals(_UpperCamelCase )
savings += len(_UpperCamelCase ) - len(_UpperCamelCase )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(_UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
_UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1":
__lowerCamelCase = 2
# Initialize accelerator
__lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase ,references=_UpperCamelCase ,)
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
main()
| 330 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a: Union[str, Any] = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[int] = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__a: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 198 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ = None
try:
import msvcrt
except ImportError:
a_ = None
try:
import fcntl
except ImportError:
a_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ = OSError
# Data
# ------------------------------------------------
a_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ = """3.0.12"""
a_ = None
def a__ ( ):
global _logger
__lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
__lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCamelCase = None
# The default timeout value.
__lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCamelCase = 0
return None
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = float(__UpperCAmelCase )
return None
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
__lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
__lowerCamelCase = os.path.dirname(__UpperCAmelCase )
__lowerCamelCase = str(hash(__UpperCAmelCase ) )
__lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
__lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ = None
if msvcrt:
a_ = WindowsFileLock
elif fcntl:
a_ = UnixFileLock
else:
a_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 330 | 0 |
import os
def a ( snake_case__: str ):
'''simple docstring'''
lowercase_ = len(grid[0] )
lowercase_ = len(_UpperCamelCase )
lowercase_ = 0
lowercase_ = 0
lowercase_ = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_UpperCamelCase ):
for j in range(n_rows - 3 ):
lowercase_ = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase_ = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase_ = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase_ = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase_ = max(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if max_product > largest:
lowercase_ = max_product
return largest
def a ( ):
'''simple docstring'''
lowercase_ = []
with open(os.path.dirname(_UpperCamelCase ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
lowercase_ = [[int(_UpperCamelCase ) for i in grid[j]] for j in range(len(_UpperCamelCase ) )]
return largest_product(_UpperCamelCase )
if __name__ == "__main__":
print(solution())
| 30 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_frames
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = attention_type
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCamelCase = self.num_labels
return config
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TimesformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
# verify the logits shape
__lowerCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerModelTester(self )
__lowerCamelCase = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = self.model_tester.seq_length
__lowerCamelCase = self.model_tester.num_frames
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCamelCase = len(__UpperCAmelCase )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def a__ ( ):
__lowerCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' )
__lowerCamelCase = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__UpperCAmelCase )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**__UpperCAmelCase )
# verify the logits
__lowerCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330 | 0 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __lowerCamelCase ( a_ : List[str] , a_ : Optional[Any] , a_ : Optional[int] ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _UpperCamelCase )
__SCREAMING_SNAKE_CASE :str = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__SCREAMING_SNAKE_CASE :str = dataset_size < in_memory_max_size
else:
__SCREAMING_SNAKE_CASE :Any = False
__SCREAMING_SNAKE_CASE :Union[str, Any] = is_small_dataset(_UpperCamelCase )
assert result == expected | 191 |
def a__ ( _UpperCamelCase : int ):
if not isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 0:
return False
__lowerCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 0 |
A__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = set()
# keep track of all the paths to be checked
snake_case__ : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
snake_case__ : Union[str, Any] = queue.pop(0 )
# get the last node from the path
snake_case__ : List[str] = path[-1]
if node not in explored:
snake_case__ : int = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
snake_case__ : Dict = list(_UpperCamelCase )
new_path.append(_UpperCamelCase )
queue.append(_UpperCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCamelCase )
# in case there's no path between the 2 nodes
return []
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
snake_case__ : int = [start]
snake_case__ : Dict = set(_UpperCamelCase )
# Keep tab on distances from `start` node.
snake_case__ : Tuple = {start: 0, target: -1}
while queue:
snake_case__ : Tuple = queue.pop(0 )
if node == target:
snake_case__ : List[str] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCamelCase )
queue.append(_UpperCamelCase )
snake_case__ : Tuple = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 230 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy"""
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase )
return image
def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = '''bf16''' if fpaa else None
__lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained(
__UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase )
return model, params
def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase )
__lowerCamelCase = model.apply(
{'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase )
__lowerCamelCase = model.apply(
{'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
| 330 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = AudioLDMPipeline
UpperCamelCase_ : Tuple = TEXT_TO_AUDIO_PARAMS
UpperCamelCase_ : str = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCamelCase_ : List[str] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(3_2, 6_4) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__UpperCAmelCase , )
_UpperCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
_UpperCAmelCase : Optional[Any] = ClapTextModelWithProjection(__UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=7_7 )
_UpperCAmelCase : List[str] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__UpperCAmelCase , )
_UpperCAmelCase : Optional[Any] = SpeechTaHifiGan(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int=0 ) -> List[str]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith("mps" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(__UpperCAmelCase )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_UpperCAmelCase : Tuple = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Dict = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : int = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 2_5_6
_UpperCAmelCase : List[str] = audio[:1_0]
_UpperCAmelCase : List[Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : Dict = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Tuple = audioldm_pipe.to(__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Tuple = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase : Optional[Any] = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : List[str] = output.audios[0]
_UpperCAmelCase : str = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : Tuple = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : Any = audioldm_pipe.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
_UpperCAmelCase : List[Any] = text_inputs["input_ids"].to(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = audioldm_pipe.text_encoder(
__UpperCAmelCase , )
_UpperCAmelCase : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCAmelCase : Optional[int] = F.normalize(__UpperCAmelCase , dim=-1 )
_UpperCAmelCase : Dict = prompt_embeds
# forward
_UpperCAmelCase : Optional[int] = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : Tuple = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : List[Any] = audioldm_pipe.to(__UpperCAmelCase )
_UpperCAmelCase : Any = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : str = 3 * ["this is a negative prompt"]
_UpperCAmelCase : Dict = negative_prompt
_UpperCAmelCase : Optional[Any] = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase : Tuple = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = output.audios[0]
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : Tuple = []
for p in [prompt, negative_prompt]:
_UpperCAmelCase : Any = audioldm_pipe.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
_UpperCAmelCase : Union[str, Any] = text_inputs["input_ids"].to(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = audioldm_pipe.text_encoder(
__UpperCAmelCase , )
_UpperCAmelCase : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCAmelCase : Optional[Any] = F.normalize(__UpperCAmelCase , dim=-1 )
embeds.append(__UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Dict = embeds
# forward
_UpperCAmelCase : Any = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Dict = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_UpperCAmelCase : Tuple = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : int = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : Tuple = "egg cracking"
_UpperCAmelCase : int = audioldm_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
_UpperCAmelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 2_5_6
_UpperCAmelCase : List[Any] = audio[:1_0]
_UpperCAmelCase : str = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : str = self.get_dummy_components()
_UpperCAmelCase : Dict = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_UpperCAmelCase : Any = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Tuple = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : List[Any] = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
_UpperCAmelCase : Dict = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_UpperCAmelCase : Any = 2
_UpperCAmelCase : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : List[Any] = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Dict = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : int = self.get_dummy_components()
_UpperCAmelCase : Tuple = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe.vocoder.config.sampling_rate
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe(audio_length_in_s=0.016 , **__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.016
_UpperCAmelCase : Dict = audioldm_pipe(audio_length_in_s=0.032 , **__UpperCAmelCase )
_UpperCAmelCase : int = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.032
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
_UpperCAmelCase : List[Any] = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Tuple = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Tuple = ["hey"]
_UpperCAmelCase : int = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 )
_UpperCAmelCase : List[str] = output.audios.shape
assert audio_shape == (1, 2_5_6)
_UpperCAmelCase : List[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_UpperCAmelCase : Dict = SpeechTaHifiGan(__UpperCAmelCase ).to(__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 )
_UpperCAmelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=__UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase )
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any="cpu" , lowerCAmelCase__ : Union[str, Any]=torch.floataa , lowerCAmelCase__ : List[Any]=0 ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 8, 1_2_8, 1_6) )
_UpperCAmelCase : Optional[Any] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_UpperCAmelCase : int = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : List[str] = self.get_inputs(__UpperCAmelCase )
_UpperCAmelCase : List[str] = 2_5
_UpperCAmelCase : Union[str, Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 8_1_9_2_0
_UpperCAmelCase : List[str] = audio[7_7_2_3_0:7_7_2_4_0]
_UpperCAmelCase : Dict = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_UpperCAmelCase : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Any = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_UpperCAmelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_UpperCAmelCase : int = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = self.get_inputs(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 8_1_9_2_0
_UpperCAmelCase : str = audio[2_7_7_8_0:2_7_7_9_0]
_UpperCAmelCase : Union[str, Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_UpperCAmelCase : str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2 | 145 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Tuple = torch.device("cpu")
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
_snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def SCREAMING_SNAKE_CASE__ ( __A ) -> Tuple:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[Any]:
_snake_case = dct.pop(_UpperCamelCase )
_snake_case = val
def SCREAMING_SNAKE_CASE__ ( __A ) -> Union[str, Any]:
_snake_case = []
for k in state_dict.keys():
_snake_case = k
if ".pwconv" in k:
_snake_case = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
_snake_case = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
_snake_case = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
_snake_case = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
_snake_case = k_new.split('.' )
if ls[2].isdigit():
_snake_case = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
_snake_case = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> List[str]:
_snake_case = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_snake_case = 1_000
_snake_case = 'huggingface/label-files'
_snake_case = 'imagenet-1k-id2label.json'
_snake_case = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
_snake_case = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_snake_case = [3, 3, 6, 4]
_snake_case = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_snake_case = [3, 3, 9, 6]
_snake_case = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_snake_case = [4, 3, 10, 5]
_snake_case = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_snake_case = [4, 4, 12, 6]
_snake_case = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
_snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' , check_hash=_UpperCamelCase )
else:
_snake_case = torch.load(_UpperCamelCase , map_location='cpu' )
_snake_case = checkpoint
_snake_case = create_rename_keys(_UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
_snake_case = SwiftFormerForImageClassification(_UpperCamelCase ).eval()
hf_model.load_state_dict(_UpperCamelCase )
# prepare test inputs
_snake_case = prepare_img()
_snake_case = ViTImageProcessor.from_pretrained('preprocessor_config' )
_snake_case = processor(images=_UpperCamelCase , return_tensors='pt' )
# compare outputs from both models
_snake_case = get_expected_output(_UpperCamelCase )
_snake_case = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _UpperCamelCase , atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
lowercase : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 42 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a__ ( _UpperCamelCase : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = module
__lowerCamelCase = nn.Sequential(
nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , )
__lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ = """bigscience/bloom-1b7"""
# Constant values
lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74
lowerCAmelCase__ = """Hello my name is"""
lowerCAmelCase__ = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
# Models and tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def lowerCamelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_abit.config
self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) )
__lowerCamelCase = config.to_dict()
__lowerCamelCase = config.to_diff_dict()
__lowerCamelCase = config.to_json_string()
def lowerCamelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__lowerCamelCase = self.model_fpaa.get_memory_footprint()
__lowerCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowerCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitsAndBytesConfig()
__lowerCamelCase = True
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitsAndBytesConfig()
with self.assertRaises(__UpperCAmelCase ):
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = self.model_fpaa.to(torch.floataa )
__lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.half()
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.float()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
__lowerCamelCase = '''t5-small'''
__lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
__lowerCamelCase = '''Translate in German: Hello, my dog is cute'''
def lowerCamelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowerCamelCase = None
# test with `t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
__lowerCamelCase = modules
def lowerCamelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__lowerCamelCase = '''bigscience/bloom-560m'''
__lowerCamelCase = '''t5-small'''
# Different types of model
__lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Sequence classification model
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# CausalLM model
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Seq2seq model
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def lowerCamelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowerCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
__lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowerCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowerCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCAmelCase ) ):
__lowerCamelCase = LoRALayer(module.q_proj , rank=16 )
__lowerCamelCase = LoRALayer(module.k_proj , rank=16 )
__lowerCamelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowerCamelCase = model.forward(**__UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """gpt2-xl"""
lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
| 330 | 0 |
__magic_name__: Dict = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 342 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = True
@register_to_config
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowerCamelCase ( self , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = use_tiling
def lowerCamelCase ( self ):
'''simple docstring'''
self.enable_tiling(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = True
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase )
for y in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase )
for x in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = sample
__lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=__UpperCAmelCase )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 330 | 0 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
A =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
A ={
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
A ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A ='facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
A ='allenai'
def snake_case_ (_a : Union[str, Any] ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase = dict((re.sub(R'''@@$''' , '''''' , _UpperCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _UpperCamelCase ), v) for k, v in d.items() )
UpperCAmelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
UpperCAmelCase = d[k] # restore
return da
def snake_case_ (_a : str , _a : List[str] ):
# prep
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
UpperCAmelCase = basename(_UpperCamelCase )
UpperCAmelCase = dirname(_UpperCamelCase )
UpperCAmelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase = cls.hub_models()
UpperCAmelCase = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''}
UpperCAmelCase = '''.'''
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"using checkpoint {checkpoint_file}" )
UpperCAmelCase = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase = vars(chkpt['''args''']['''model'''] )
UpperCAmelCase = args['''source_lang''']
UpperCAmelCase = args['''target_lang''']
UpperCAmelCase = dirname(_UpperCamelCase )
UpperCAmelCase = basename(_UpperCamelCase )
# dicts
UpperCAmelCase = os.path.join(_UpperCamelCase , F"dict.{src_lang}.txt" )
UpperCAmelCase = os.path.join(_UpperCamelCase , F"dict.{tgt_lang}.txt" )
UpperCAmelCase = Dictionary.load(_UpperCamelCase )
UpperCAmelCase = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase = len(_UpperCamelCase )
UpperCAmelCase = os.path.join(_UpperCamelCase , '''vocab-src.json''' )
print(F"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase = False
break
UpperCAmelCase = Dictionary.load(_UpperCamelCase )
UpperCAmelCase = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase = len(_UpperCamelCase )
UpperCAmelCase = os.path.join(_UpperCamelCase , '''vocab-tgt.json''' )
print(F"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
UpperCAmelCase = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['''merges_file'''] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='''utf-8''' ) as fin:
UpperCAmelCase = fin.read()
UpperCAmelCase = re.sub(R''' \d+$''' , '''''' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(F"Generating {merges_file}" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as fout:
fout.write(_UpperCamelCase )
# model config
UpperCAmelCase = os.path.join(_UpperCamelCase , '''config.json''' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", F"need to extend tokenizer to support bpe={args['tokenizer']}"
UpperCAmelCase = {
'''architectures''': ['''FSMTForConditionalGeneration'''],
'''model_type''': '''fsmt''',
'''activation_dropout''': args['''activation_dropout'''],
'''activation_function''': '''relu''',
'''attention_dropout''': args['''attention_dropout'''],
'''d_model''': args['''decoder_embed_dim'''],
'''dropout''': args['''dropout'''],
'''init_std''': 0.02,
'''max_position_embeddings''': args['''max_source_positions'''],
'''num_hidden_layers''': args['''encoder_layers'''],
'''src_vocab_size''': src_vocab_size,
'''tgt_vocab_size''': tgt_vocab_size,
'''langs''': [src_lang, tgt_lang],
'''encoder_attention_heads''': args['''encoder_attention_heads'''],
'''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''],
'''encoder_layerdrop''': args['''encoder_layerdrop'''],
'''encoder_layers''': args['''encoder_layers'''],
'''decoder_attention_heads''': args['''decoder_attention_heads'''],
'''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''],
'''decoder_layerdrop''': args['''decoder_layerdrop'''],
'''decoder_layers''': args['''decoder_layers'''],
'''bos_token_id''': 0,
'''pad_token_id''': 1,
'''eos_token_id''': 2,
'''is_encoder_decoder''': True,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_all_embeddings'''],
}
# good hparam defaults to start with
UpperCAmelCase = 5
UpperCAmelCase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase = best_score_hparams[model_dir]['''length_penalty''']
else:
UpperCAmelCase = 1.0
print(F"Generating {fsmt_model_config_file}" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
UpperCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = {
'''langs''': [src_lang, tgt_lang],
'''model_max_length''': 1_0_2_4,
'''do_lower_case''': do_lower_case,
}
print(F"Generating {fsmt_tokenizer_config_file}" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
UpperCAmelCase = chkpt['''models'''][0]
UpperCAmelCase = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase = [
'''model.model''',
'''model.encoder.version''',
'''model.decoder.version''',
'''model.encoder_embed_tokens.weight''',
'''model.decoder_embed_tokens.weight''',
'''model.encoder.embed_positions._float_tensor''',
'''model.decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase = FSMTConfig.from_pretrained(_UpperCamelCase )
UpperCAmelCase = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
UpperCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('''Conversion is done!''' )
print('''\nLast step is to upload the files to s3''' )
print(F"cd {data_root}" )
print(F"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 34 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping['''model.embed_positions.weight''']
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ):
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] )
lowercase__ = np.array(_UpperCamelCase )
lowercase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCamelCase ) ) , x.transpose() ) , _UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> Tuple:
"""simple docstring"""
lowercase__ = (1, 2, 1)
lowercase__ = (1, 1, 0, 7)
lowercase__ = SARIMAX(
_UpperCamelCase , exog=_UpperCamelCase , order=_UpperCamelCase , seasonal_order=_UpperCamelCase )
lowercase__ = model.fit(disp=_UpperCamelCase , maxiter=600 , method="""nm""" )
lowercase__ = model_fit.predict(1 , len(_UpperCamelCase ) , exog=[test_match] )
return result[0]
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : list ) -> Any:
"""simple docstring"""
lowercase__ = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCamelCase , _UpperCamelCase )
lowercase__ = regressor.predict(_UpperCamelCase )
return y_pred[0]
def UpperCamelCase ( __magic_name__ : list ) -> str:
"""simple docstring"""
train_user.sort()
lowercase__ = np.percentile(_UpperCamelCase , 25 )
lowercase__ = np.percentile(_UpperCamelCase , 75 )
lowercase__ = qa - qa
lowercase__ = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase ( __magic_name__ : list , __magic_name__ : float ) -> Any:
"""simple docstring"""
lowercase__ = 0
lowercase__ = 0
for i in list_vote:
if i > actual_result:
lowercase__ = not_safe + 1
else:
if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
A : int = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
A : Any = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
A : Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
A : int = normalize_df[:, 2].tolist()
A : Dict = normalize_df[:, 0].tolist()
A : Tuple = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
A : List[Any] = normalize_df[:, [1, 2]].tolist()
A : int = x[: len(x) - 1]
A : int = x[len(x) - 1 :]
# for linear regression & sarimax
A : Optional[int] = total_date[: len(total_date) - 1]
A : str = total_user[: len(total_user) - 1]
A : List[Any] = total_match[: len(total_match) - 1]
A : List[Any] = total_date[len(total_date) - 1 :]
A : str = total_user[len(total_user) - 1 :]
A : Optional[int] = total_match[len(total_match) - 1 :]
# voting system with forecasting
A : Tuple = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
A : List[str] = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 305 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__lowerCamelCase = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__lowerCamelCase = text
def lowerCamelCase ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
self.generated_responses.append(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__lowerCamelCase = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__lowerCamelCase = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
lowerCAmelCase__ , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__lowerCamelCase = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:]
__lowerCamelCase = model_inputs.pop('''conversation''' )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = model_outputs['''output_ids''']
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
__lowerCamelCase = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 330 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_UpperCamelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _a :
def lowerCamelCase_ ( self: int , UpperCamelCase_: str , UpperCamelCase_: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple ) -> int:
"""simple docstring"""
lowercase__ = np.abs((a - b) ).max()
self.assertLessEqual(__UpperCAmelCase , __UpperCAmelCase , f'Difference between torch and flax is {diff} (>= {tol}).' )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: int=None , **UpperCamelCase_: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel(__UpperCAmelCase )
lowercase__ = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Tuple , UpperCamelCase_: Dict , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Any=None , **UpperCamelCase_: List[Any] ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase )
lowercase__ = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase )
lowercase__ = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
lowercase__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase )
lowercase__ = model(input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase )
lowercase__ = after_output[0]
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Any , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: List[str] , UpperCamelCase_: Dict=None , **UpperCamelCase_: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = self.get_vision_text_model(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCAmelCase )
lowercase__ = model(
input_ids=__UpperCAmelCase , pixel_values=__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_attentions=__UpperCAmelCase )
lowercase__ = output.vision_model_output.attentions
self.assertEqual(len(__UpperCAmelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = to_atuple(vision_model.config.image_size )
lowercase__ = to_atuple(vision_model.config.patch_size )
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowercase__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowercase__ = output.text_model_output.attentions
self.assertEqual(len(__UpperCAmelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Optional[Any] ) -> Dict:
"""simple docstring"""
pt_model.to(__UpperCAmelCase )
pt_model.eval()
# prepare inputs
lowercase__ = inputs_dict
lowercase__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowercase__ = pt_model(**__UpperCAmelCase ).to_tuple()
lowercase__ = fx_model(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCAmelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase )
lowercase__ = fx_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCAmelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCAmelCase )
lowercase__ = VisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase , from_flax=__UpperCAmelCase )
pt_model_loaded.to(__UpperCAmelCase )
pt_model_loaded.eval()
with torch.no_grad():
lowercase__ = pt_model_loaded(**__UpperCAmelCase ).to_tuple()
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__UpperCAmelCase , pt_output_loaded.numpy() , 4E-2 )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ = VisionTextDualEncoderModel(__UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel(__UpperCAmelCase )
lowercase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCAmelCase )
lowercase__ = fx_state
self.check_pt_flax_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any] ) -> Any:
"""simple docstring"""
lowercase__ = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ = VisionTextDualEncoderModel(__UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel(__UpperCAmelCase )
lowercase__ = load_flax_weights_in_pytorch_model(__UpperCAmelCase , fx_model.params )
self.check_pt_flax_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__UpperCAmelCase )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__UpperCAmelCase )
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
self.check_save_load(**__UpperCAmelCase )
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__UpperCAmelCase )
@is_pt_flax_cross_test
def lowerCamelCase_ ( self: Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ = config_inputs_dict.pop('''vision_config''' )
lowercase__ = config_inputs_dict.pop('''text_config''' )
lowercase__ = config_inputs_dict
self.check_equivalence_pt_to_flax(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.check_equivalence_flax_to_pt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
@slow
def lowerCamelCase_ ( self: Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ = self.get_pretrained_model_and_inputs()
lowercase__ = model_a(**__UpperCAmelCase )
lowercase__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__UpperCAmelCase )
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCAmelCase )
lowercase__ = model_a(**__UpperCAmelCase )
lowercase__ = after_outputs[0]
lowercase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1E-5 )
@require_flax
class _a ( lowerCAmelCase__ , unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__UpperCAmelCase , text_from_pt=__UpperCAmelCase , )
lowercase__ = 13
lowercase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase__ = random_attention_mask([batch_size, 4] )
lowercase__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaxViTModel(__UpperCAmelCase )
lowercase__ = FlaxBertModel(__UpperCAmelCase )
return vision_model, text_model
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = FlaxViTModelTester(self )
lowercase__ = FlaxBertModelTester(self )
lowercase__ = vit_model_tester.prepare_config_and_inputs()
lowercase__ = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ = vision_config_and_inputs
lowercase__ , lowercase__ , lowercase__ , lowercase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _a ( lowerCAmelCase__ , unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=__UpperCAmelCase , text_from_pt=__UpperCAmelCase , )
lowercase__ = 13
lowercase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowercase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowercase__ = random_attention_mask([batch_size, 4] )
lowercase__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str ) -> str:
"""simple docstring"""
lowercase__ = FlaxCLIPVisionModel(__UpperCAmelCase )
lowercase__ = FlaxBertModel(__UpperCAmelCase )
return vision_model, text_model
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = FlaxCLIPVisionModelTester(self )
lowercase__ = FlaxBertModelTester(self )
lowercase__ = clip_model_tester.prepare_config_and_inputs()
lowercase__ = bert_model_tester.prepare_config_and_inputs()
lowercase__ , lowercase__ = vision_config_and_inputs
lowercase__ , lowercase__ , lowercase__ , lowercase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
lowercase__ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''np''' )
lowercase__ = model(**__UpperCAmelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowercase__ = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCAmelCase , atol=1E-3 ) )
| 110 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a__ ( _UpperCamelCase : int ):
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
__lowerCamelCase = PegasusConfig(**_UpperCamelCase )
__lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.model.state_dict()
__lowerCamelCase = {}
for k, v in tf_weights.items():
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCamelCase = v.T
__lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# save tokenizer first
__lowerCamelCase = Path(_UpperCamelCase ).parent.name
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
__lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
__lowerCamelCase = task_specific_params
__lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
if args.save_dir is None:
a_ = Path(args.tf_ckpt_path).parent.name
a_ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 330 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class A__ :
lowerCAmelCase__ : str = BlenderbotSmallConfig
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : str = "gelu"
def __init__( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=False , _UpperCAmelCase : Any=99 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : List[Any]=20 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : Tuple=0 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase = prepare_blenderbot_small_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, inputs_dict
def a__ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase = TFBlenderbotSmallModel(config=__UpperCAmelCase ).get_decoder()
__lowercase = inputs_dict['input_ids']
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict['attention_mask'][:1, :]
__lowercase = inputs_dict['head_mask']
__lowercase = 1
# first forward pass
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
__lowercase , __lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCAmelCase , __UpperCAmelCase , rtol=1e-3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : str=None , ) -> Dict:
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCAmelCase__ : int = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ : Any = (
{
"conversational": TFBlenderbotSmallForConditionalGeneration,
"feature-extraction": TFBlenderbotSmallModel,
"summarization": TFBlenderbotSmallForConditionalGeneration,
"text2text-generation": TFBlenderbotSmallForConditionalGeneration,
"translation": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Tuple = False
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = TFBlenderbotSmallModelTester(self )
__lowercase = ConfigTester(self , config_class=__UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase )
@require_tokenizers
@require_tf
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : Dict = [
"Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like "
" i'm going to throw up.\nand why is that?"
]
lowerCAmelCase__ : List[Any] = "facebook/blenderbot_small-90M"
@cached_property
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.tokenizer(self.src_text , return_tensors='tf' )
__lowercase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCAmelCase , )
__lowercase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCAmelCase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 325 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
a_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : Tuple ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ):
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase )
if weight_type is not None:
__lowerCamelCase = getattr(_UpperCamelCase ,_UpperCamelCase ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Any ):
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,hf_model.config.feat_extract_norm == '''group''' ,)
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(_UpperCamelCase )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' ,_UpperCamelCase )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
else:
__lowerCamelCase = None
set_recursively(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def a__ ( _UpperCamelCase : List[Any] ,_UpperCamelCase : List[str] ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : Tuple=None ,_UpperCamelCase : List[Any]=True ):
if config_path is not None:
__lowerCamelCase = UniSpeechSatConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCamelCase = UniSpeechSatConfig()
__lowerCamelCase = ''''''
if is_finetuned:
__lowerCamelCase = UniSpeechSatForCTC(_UpperCamelCase )
else:
__lowerCamelCase = UniSpeechSatForPreTraining(_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__lowerCamelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase ,_UpperCamelCase )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a_ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330 | 0 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Any = x
lowercase__ : List[str] = y
for step in range(_UpperCamelCase ): # noqa: B007
lowercase__ : Tuple = a * a - b * b + x
lowercase__ : Dict = 2 * a * b + y
lowercase__ : Tuple = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCamelCase ( UpperCAmelCase ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __UpperCamelCase ( UpperCAmelCase ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_UpperCamelCase , 1 , 1 ) )
def __UpperCamelCase ( UpperCAmelCase = 800 , UpperCAmelCase = 600 , UpperCAmelCase = -0.6 , UpperCAmelCase = 0 , UpperCAmelCase = 3.2 , UpperCAmelCase = 50 , UpperCAmelCase = True , ):
lowercase__ : Any = Image.new('''RGB''' , (image_width, image_height) )
lowercase__ : str = img.load()
# loop through the image-coordinates
for image_x in range(_UpperCamelCase ):
for image_y in range(_UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
lowercase__ : int = figure_width / image_width * image_height
lowercase__ : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowercase__ : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowercase__ : Dict = get_distance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowercase__ : List[str] = get_color_coded_rgb(_UpperCamelCase )
else:
lowercase__ : int = get_black_and_white_rgb(_UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__a: int = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 198 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
a_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(__UpperCAmelCase )
def __call__( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return super().__call__(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return {}, {}, {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = load_image(__UpperCAmelCase )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=__UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.model(**__UpperCAmelCase )
return model_outputs
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=__UpperCAmelCase )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 255 / np.max(__UpperCAmelCase )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(__UpperCAmelCase )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 330 | 0 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
__a = 'bert-base-cased'
__a = 'fp16'
__a = 'bf16'
__a = [FPaa, BFaa]
@require_fsdp
@require_cuda
class lowercase__( lowerCAmelCase__ ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) -> Any:
super().setUp()
lowercase_ = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def _lowercase ( self : int ) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__UpperCAmelCase ):
lowercase_ = self.dist_env.copy()
lowercase_ = f'''{i + 1}'''
lowercase_ = strategy
with mockenv_context(**__UpperCAmelCase ):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _lowercase ( self : Tuple ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__UpperCAmelCase ):
lowercase_ = self.dist_env.copy()
lowercase_ = prefetch_policy
with mockenv_context(**__UpperCAmelCase ):
lowercase_ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__UpperCAmelCase ):
lowercase_ = self.dist_env.copy()
lowercase_ = state_dict_type
with mockenv_context(**__UpperCAmelCase ):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ = AutoModel.from_pretrained(__UpperCAmelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
lowercase_ = self.dist_env.copy()
lowercase_ = policy
if policy == "TRANSFORMER_BASED_WRAP":
lowercase_ = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
lowercase_ = '''2000'''
with mockenv_context(**__UpperCAmelCase ):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__UpperCAmelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
lowercase_ = self.dist_env.copy()
lowercase_ = '''TRANSFORMER_BASED_WRAP'''
lowercase_ = '''T5Layer'''
with mockenv_context(**__UpperCAmelCase ):
lowercase_ = FullyShardedDataParallelPlugin()
with self.assertRaises(__UpperCAmelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(__UpperCAmelCase )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
lowercase_ = self.dist_env.copy()
lowercase_ = '''SIZE_BASED_WRAP'''
lowercase_ = '''0'''
with mockenv_context(**__UpperCAmelCase ):
lowercase_ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__UpperCAmelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _lowercase ( self : Dict ) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
lowercase_ = self.dist_env.copy()
lowercase_ = mp_dtype
with mockenv_context(**__UpperCAmelCase ):
lowercase_ = Accelerator()
if mp_dtype == "fp16":
lowercase_ = torch.floataa
elif mp_dtype == "bf16":
lowercase_ = torch.bfloataa
lowercase_ = MixedPrecision(param_dtype=__UpperCAmelCase , reduce_dtype=__UpperCAmelCase , buffer_dtype=__UpperCAmelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __UpperCAmelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __UpperCAmelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__UpperCAmelCase )
def _lowercase ( self : Optional[Any] ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
lowercase_ = self.dist_env.copy()
lowercase_ = str(__UpperCAmelCase ).lower()
with mockenv_context(**__UpperCAmelCase ):
lowercase_ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__UpperCAmelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class lowercase__( lowerCAmelCase__ ):
"""simple docstring"""
def _lowercase ( self : Tuple ) -> Optional[int]:
super().setUp()
lowercase_ = 0.82
lowercase_ = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
lowercase_ = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
lowercase_ = 1_6_0
lowercase_ = 1_6_0
lowercase_ = inspect.getfile(accelerate.test_utils )
lowercase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def _lowercase ( self : str ) -> Optional[int]:
lowercase_ = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
lowercase_ = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
lowercase_ = cmd.copy()
for i, strategy in enumerate(__UpperCAmelCase ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
def _lowercase ( self : List[str] ) -> str:
lowercase_ = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
lowercase_ = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(__UpperCAmelCase ):
lowercase_ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
lowercase_ = len(__UpperCAmelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
lowercase_ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
lowercase_ = cmd_config[:-1]
lowercase_ = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
lowercase_ = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
lowercase_ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(__UpperCAmelCase ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
| 30 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
a_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = ["""pixel_values"""]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = size if size is not None else {'''shortest_edge''': 224}
__lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
__lowerCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' )
__lowerCamelCase = do_resize
__lowerCamelCase = size
__lowerCamelCase = resample
__lowerCamelCase = do_center_crop
__lowerCamelCase = crop_size
__lowerCamelCase = do_rescale
__lowerCamelCase = rescale_factor
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowerCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowerCamelCase = do_convert_rgb
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
__lowerCamelCase = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = do_resize if do_resize is not None else self.do_resize
__lowerCamelCase = size if size is not None else self.size
__lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase )
__lowerCamelCase = resample if resample is not None else self.resample
__lowerCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCamelCase = crop_size if crop_size is not None else self.crop_size
__lowerCamelCase = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase )
__lowerCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCamelCase = image_mean if image_mean is not None else self.image_mean
__lowerCamelCase = image_std if image_std is not None else self.image_std
__lowerCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowerCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowerCamelCase = [convert_to_rgb(__UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__lowerCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
__lowerCamelCase = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
__lowerCamelCase = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
__lowerCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
__lowerCamelCase = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
__lowerCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__lowerCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 330 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def __lowerCamelCase ( a_ : Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE :str = {}
__SCREAMING_SNAKE_CASE :int = os.path.join(_UpperCamelCase , '''all_results.json''' )
if os.path.exists(_UpperCamelCase ):
with open(_UpperCamelCase , '''r''' ) as f:
__SCREAMING_SNAKE_CASE :List[Any] = json.load(_UpperCamelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
lowerCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _SCREAMING_SNAKE_CASE( lowerCAmelCase__ ):
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
import xla_spawn
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE :Optional[Any] = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(__UpperCAmelCase ,'''argv''' ,__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE :Tuple = time()
xla_spawn.main()
__SCREAMING_SNAKE_CASE :Union[str, Any] = time()
__SCREAMING_SNAKE_CASE :int = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start ,5_00 )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
import xla_spawn
__SCREAMING_SNAKE_CASE :Any = '''
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
'''.split()
with patch.object(__UpperCAmelCase ,'''argv''' ,__UpperCAmelCase ):
xla_spawn.main() | 191 |
from __future__ import annotations
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = self
__lowerCamelCase = 0
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# map from node name to the node object
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# create a new set with x as its member
__lowerCamelCase = DisjointSetTreeNode(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# find the set x belongs to (with path-compression)
__lowerCamelCase = self.map[data]
if elem_ref != elem_ref.parent:
__lowerCamelCase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# helper function for union operation
if nodea.rank > nodea.rank:
__lowerCamelCase = nodea
else:
__lowerCamelCase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# merge 2 disjoint sets
self.link(self.find_set(__UpperCAmelCase ) , self.find_set(__UpperCAmelCase ) )
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
# connections: map from the node to the neighbouring nodes (with weights)
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
# add a node ONLY if its not present in the graph
if node not in self.connections:
__lowerCamelCase = {}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
# add an edge with the given weight
self.add_node(__UpperCAmelCase )
self.add_node(__UpperCAmelCase )
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = []
__lowerCamelCase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __UpperCAmelCase : x[2] )
# creating the disjoint set
__lowerCamelCase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__UpperCAmelCase )
# MST generation
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = edges[index]
index += 1
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
__lowerCamelCase = disjoint_set.find_set(__UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
disjoint_set.union(__UpperCAmelCase , __UpperCAmelCase )
return graph
| 330 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 230 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowerCamelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330 | 0 |
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase : str = 1
_UpperCAmelCase : Optional[Any] = 1
while repunit:
_UpperCAmelCase : str = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __UpperCAmelCase ( a_: int = 1_000_000 ):
_UpperCAmelCase : str = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_UpperCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }') | 145 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase : List[str] = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=8 ) -> str:
_snake_case = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_snake_case = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class __UpperCAmelCase ( lowerCAmelCase__ ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , )
_snake_case = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if latents is None:
_snake_case = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_snake_case = latents.to(__UpperCAmelCase )
_snake_case = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_snake_case = torch.device(F'cuda:{gpu_id}' )
_snake_case = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self , lowerCAmelCase_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_snake_case = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_snake_case = None
for cpu_offloaded_model in [self.unet, self.movq]:
_snake_case , _snake_case = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
_snake_case = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 5_12 , lowerCAmelCase_ = 5_12 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 4.0 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ):
"""simple docstring"""
_snake_case = self._execution_device
_snake_case = guidance_scale > 1.0
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_snake_case = torch.cat(__UpperCAmelCase , dim=0 )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_snake_case = torch.cat(__UpperCAmelCase , dim=0 )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_snake_case = torch.cat(__UpperCAmelCase , dim=0 )
_snake_case = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_snake_case = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
_snake_case = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
_snake_case = hint.repeat_interleave(__UpperCAmelCase , dim=0 )
_snake_case = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase )
_snake_case = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase )
_snake_case = self.scheduler.timesteps
_snake_case = self.movq.config.latent_channels
_snake_case , _snake_case = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor )
# create initial latent
_snake_case = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = {'image_embeds': image_embeds, 'hint': hint}
_snake_case = self.unet(
sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
if do_classifier_free_guidance:
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case , _snake_case = variance_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_snake_case = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_snake_case , _snake_case = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_snake_case = self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0]
# post-processing
_snake_case = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_snake_case = image * 0.5 + 0.5
_snake_case = image.clamp(0 , 1 )
_snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_snake_case = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 42 |
from string import ascii_lowercase, ascii_uppercase
def a__ ( _UpperCamelCase : str ):
if not sentence:
return ""
__lowerCamelCase = dict(zip(_UpperCamelCase ,_UpperCamelCase ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 330 | 0 |
import re
def UpperCamelCase ( _A ):
"""simple docstring"""
if len(re.findall("""[ATCG]""", _UpperCamelCase ) ) != len(_UpperCamelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""", """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( lowerCAmelCase__ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 330 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
A =get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class _a ( lowerCAmelCase__ , unittest.TestCase ):
__a : Optional[int] = SpeechTaTokenizer
__a : Optional[Any] = False
__a : Tuple = True
def A ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = SpeechTaTokenizer(__UpperCAmelCase )
UpperCAmelCase = AddedToken('''<mask>''' , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
UpperCAmelCase = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Any , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = '''this is a test'''
UpperCAmelCase = '''this is a test'''
return input_text, output_text
def A ( self : Dict , lowercase : Dict , lowercase : Union[str, Any]=False , lowercase : str=20 , lowercase : Union[str, Any]=5 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.get_input_output_texts(__UpperCAmelCase )
UpperCAmelCase = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
return text, ids
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(__UpperCAmelCase ) , 81 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizers(do_lower_case=__UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(__UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase = tokenizer.add_tokens(__UpperCAmelCase )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(__UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase , 0 )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , len(__UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , all_size + len(__UpperCAmelCase ) )
UpperCAmelCase = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__UpperCAmelCase )
self.assertGreaterEqual(len(__UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase = tokenizer.add_special_tokens(__UpperCAmelCase )
UpperCAmelCase = tokenizer.vocab_size
UpperCAmelCase = len(__UpperCAmelCase )
self.assertNotEqual(__UpperCAmelCase , 0 )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , len(__UpperCAmelCase ) )
self.assertEqual(__UpperCAmelCase , all_size_a + len(__UpperCAmelCase ) )
UpperCAmelCase = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__UpperCAmelCase )
self.assertGreaterEqual(len(__UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def A ( self : Tuple ):
'''simple docstring'''
pass
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(__UpperCAmelCase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
# fmt: off
self.assertListEqual(__UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
UpperCAmelCase = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
UpperCAmelCase = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__UpperCAmelCase , )
| 34 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 0 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
A : Union[str, Any] = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
A : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS)
A : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
A : int = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : int , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, \"{attribute}\"''' in modeling_source
or f'''getattr(self.config, \"{attribute}\"''' in modeling_source
):
lowercase__ = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"''' , _UpperCamelCase , )
is not None
):
lowercase__ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
lowercase__ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
lowercase__ = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
lowercase__ = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
lowercase__ = True
if not attribute_used:
lowercase__ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
lowercase__ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
lowercase__ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
lowercase__ = True
elif attribute.endswith("""_token_id""" ):
lowercase__ = True
# configuration class specific cases
if not case_allowed:
lowercase__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
lowercase__ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( __magic_name__ : str ) -> int:
"""simple docstring"""
lowercase__ = dict(inspect.signature(config_class.__init__ ).parameters )
lowercase__ = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
lowercase__ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
lowercase__ = {}
if len(config_class.attribute_map ) > 0:
lowercase__ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
lowercase__ = inspect.getsourcefile(_UpperCamelCase )
lowercase__ = os.path.dirname(_UpperCamelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
lowercase__ = [os.path.join(_UpperCamelCase , _UpperCamelCase ) for fn in os.listdir(_UpperCamelCase ) if fn.startswith("""modeling_""" )]
# Get the source code strings
lowercase__ = []
for path in modeling_paths:
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase ) as fp:
modeling_sources.append(fp.read() )
lowercase__ = []
for config_param, default_value in zip(_UpperCamelCase , _UpperCamelCase ):
# `attributes` here is all the variant names for `config_param`
lowercase__ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
unused_attributes.append(attributes[0] )
return sorted(_UpperCamelCase )
def UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowercase__ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
lowercase__ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __magic_name__ : inspect.isclass(_UpperCamelCase )
and issubclass(_UpperCamelCase , _UpperCamelCase )
and inspect.getmodule(_UpperCamelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
lowercase__ = check_config_attributes_being_used(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase__ = unused_attributes
if len(_UpperCamelCase ) > 0:
lowercase__ = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
check_config_attributes()
| 305 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ):
'''simple docstring'''
__lowerCamelCase = p_stop
__lowerCamelCase = max_length
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCamelCase = random.random() < self.p_stop
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = [
BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
for i in range(2 )
]
__lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ):
'''simple docstring'''
random.seed(__UpperCAmelCase )
__lowerCamelCase = list(__UpperCAmelCase )
__lowerCamelCase = [
IterableDatasetShard(
__UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , )
for i in range(__UpperCAmelCase )
]
__lowerCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCAmelCase )
iterable_dataset_lists.append(list(__UpperCAmelCase ) )
__lowerCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 )
__lowerCamelCase = []
for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
reference += reference
self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 42
__lowerCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
# Edge case with a very small dataset
__lowerCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 )
self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase ( self ):
'''simple docstring'''
Accelerator()
__lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 330 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class _a ( lowerCAmelCase__ ):
_lowercase : str = ['''pixel_values''']
def __init__( self: List[Any] , UpperCamelCase_: Optional[Any] = True , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: List[str] = PILImageResampling.BILINEAR , UpperCamelCase_: List[str] = True , UpperCamelCase_: List[Any] = None , UpperCamelCase_: Dict = True , UpperCamelCase_: Tuple = 1 / 255 , UpperCamelCase_: Union[str, Any] = True , UpperCamelCase_: str = None , UpperCamelCase_: str = None , **UpperCamelCase_: int , ) -> Dict:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
lowercase__ = size if size is not None else {'''shortest_edge''': 256}
lowercase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowercase__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ = get_size_dict(__UpperCAmelCase )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase_ ( self: str , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[str] = PILImageResampling.BICUBIC , UpperCamelCase_: Any = None , **UpperCamelCase_: List[str] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase__ = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase )
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Optional[Any] = None , **UpperCamelCase_: Tuple , ) -> str:
"""simple docstring"""
lowercase__ = get_size_dict(__UpperCAmelCase )
return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Any = None , **UpperCamelCase_: str ) -> Any:
"""simple docstring"""
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , UpperCamelCase_: int = None , **UpperCamelCase_: Dict , ) -> Any:
"""simple docstring"""
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple = None , UpperCamelCase_: Optional[Any] = None , UpperCamelCase_: List[str] = None , UpperCamelCase_: Dict = None , UpperCamelCase_: str = None , UpperCamelCase_: List[Any] = None , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: List[Any] = None , UpperCamelCase_: Tuple = None , UpperCamelCase_: Union[str, Any] = None , UpperCamelCase_: str = None , UpperCamelCase_: str = ChannelDimension.FIRST , **UpperCamelCase_: Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(__UpperCAmelCase )
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images]
lowercase__ = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowercase__ = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 110 |
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 330 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
__lowercase = []
for part_id in partition_order:
__lowercase = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(_UpperCamelCase ):
expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowercase = spark.range(100 ).repartition(1 )
__lowercase = Spark(_UpperCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
__lowercase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowercase = spark.range(10 ).repartition(2 )
__lowercase = [1, 0]
__lowercase = _generate_iterable_examples(_UpperCamelCase , _UpperCamelCase ) # Reverse the partitions.
__lowercase = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , _UpperCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__lowercase , __lowercase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ( ) -> int:
__lowercase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowercase = spark.range(10 ).repartition(1 )
__lowercase = SparkExamplesIterable(_UpperCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
assert row_id == F"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowercase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowercase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
__lowercase = lambda SCREAMING_SNAKE_CASE : x.reverse()
__lowercase = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [2, 1, 0] )
__lowercase = SparkExamplesIterable(_UpperCamelCase ).shuffle_data_sources(_UpperCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
__lowercase , __lowercase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowercase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowercase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__lowercase = SparkExamplesIterable(_UpperCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
__lowercase , __lowercase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__lowercase = SparkExamplesIterable(_UpperCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__lowercase = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
__lowercase , __lowercase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ( ) -> int:
__lowercase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
__lowercase = spark.range(100 ).repartition(1 )
__lowercase = Spark(_UpperCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 325 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def a__ ( _UpperCamelCase : Accelerator ,_UpperCamelCase : int = 16 ):
__lowerCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = load_dataset('''glue''' ,'''mrpc''' )
def tokenize_function(_UpperCamelCase : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=_UpperCamelCase ,max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase = datasets.map(
_UpperCamelCase ,batched=_UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(_UpperCamelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCamelCase = 8
else:
__lowerCamelCase = None
return tokenizer.pad(
_UpperCamelCase ,padding='''longest''' ,max_length=_UpperCamelCase ,pad_to_multiple_of=_UpperCamelCase ,return_tensors='''pt''' ,)
# Instantiate dataloaders.
__lowerCamelCase = DataLoader(
tokenized_datasets['''train'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
__lowerCamelCase = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=_UpperCamelCase ,collate_fn=_UpperCamelCase ,batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ = mocked_dataloaders # noqa: F811
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' ,_UpperCamelCase ) == "1":
__lowerCamelCase = 2
# Initialize accelerator
__lowerCamelCase = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase = config['''lr''']
__lowerCamelCase = int(config['''num_epochs'''] )
__lowerCamelCase = int(config['''seed'''] )
__lowerCamelCase = int(config['''batch_size'''] )
__lowerCamelCase = evaluate.load('''glue''' ,'''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' ,return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase = AdamW(params=model.parameters() ,lr=_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = get_dataloaders(_UpperCamelCase ,_UpperCamelCase )
# Instantiate scheduler
__lowerCamelCase = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase ,num_warmup_steps=1_00 ,num_training_steps=(len(_UpperCamelCase ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits.argmax(dim=-1 )
__lowerCamelCase ,__lowerCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase ,references=_UpperCamelCase ,)
__lowerCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,_UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def a__ ( ):
__lowerCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' ,type=_UpperCamelCase ,default=_UpperCamelCase ,choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] ,help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' ,)
parser.add_argument('''--cpu''' ,action='''store_true''' ,help='''If passed, will train on the CPU.''' )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
main()
| 330 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__a: Optional[int] = _symbol_database.Default()
__a: Any = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
__a: Optional[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__a: Tuple = None
__a: Optional[Any] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__a: List[Any] = 45
__a: Optional[Any] = 15_81
__a: Union[str, Any] = 15_17
__a: Any = 15_70
__a: str = 15_84
__a: List[str] = 17_93
__a: Optional[int] = 17_95
__a: Dict = 19_16
__a: Dict = 18_64
__a: int = 19_05
__a: str = 19_19
__a: List[Any] = 24_29
__a: Optional[Any] = 22_08
__a: str = 24_18
__a: Any = 23_23
__a: List[Any] = 24_07
# @@protoc_insertion_point(module_scope)
| 198 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ = None
try:
import msvcrt
except ImportError:
a_ = None
try:
import fcntl
except ImportError:
a_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ = OSError
# Data
# ------------------------------------------------
a_ = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
a_ = """3.0.12"""
a_ = None
def a__ ( ):
global _logger
__lowerCamelCase = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock_file
return None
def __str__( self ):
'''simple docstring'''
__lowerCamelCase = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = lock
return None
def __enter__( self ):
'''simple docstring'''
return self.lock
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__lowerCamelCase = self.hash_filename_if_too_long(__UpperCAmelCase , __UpperCAmelCase )
# The path to the lock file.
__lowerCamelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__lowerCamelCase = None
# The default timeout value.
__lowerCamelCase = timeout
# We use this lock primarily for the lock counter.
__lowerCamelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__lowerCamelCase = 0
return None
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._timeout
@timeout.setter
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = float(__UpperCAmelCase )
return None
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
def lowerCamelCase ( self ):
'''simple docstring'''
raise NotImplementedError()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self._lock_file_fd is not None
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=0.05 ):
'''simple docstring'''
# Use the default timeout, if no timeout is provided.
if timeout is None:
__lowerCamelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
__lowerCamelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__lowerCamelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowerCamelCase ( self , __UpperCAmelCase=False ):
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__lowerCamelCase = id(self )
__lowerCamelCase = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
__lowerCamelCase = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ):
'''simple docstring'''
self.acquire()
return self
def __exit__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
self.release()
return None
def __del__( self ):
'''simple docstring'''
self.release(force=__UpperCAmelCase )
return None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = os.path.basename(__UpperCAmelCase )
if len(__UpperCAmelCase ) > max_length and max_length > 0:
__lowerCamelCase = os.path.dirname(__UpperCAmelCase )
__lowerCamelCase = str(hash(__UpperCAmelCase ) )
__lowerCamelCase = filename[: max_length - len(__UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(__UpperCAmelCase , __UpperCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
__lowerCamelCase = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
msvcrt.locking(__UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=-1 , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = os.statvfs(os.path.dirname(__UpperCAmelCase ) ).f_namemax
super().__init__(__UpperCAmelCase , timeout=__UpperCAmelCase , max_filename_length=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
try:
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCAmelCase )
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__lowerCamelCase = self._lock_file_fd
__lowerCamelCase = None
fcntl.flock(__UpperCAmelCase , fcntl.LOCK_UN )
os.close(__UpperCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__lowerCamelCase = os.open(self._lock_file , __UpperCAmelCase )
except OSError:
pass
else:
__lowerCamelCase = fd
return None
def lowerCamelCase ( self ):
'''simple docstring'''
os.close(self._lock_file_fd )
__lowerCamelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ = None
if msvcrt:
a_ = WindowsFileLock
elif fcntl:
a_ = UnixFileLock
else:
a_ = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 330 | 0 |
__a = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
1_0: 'a',
1_1: 'b',
1_2: 'c',
1_3: 'd',
1_4: 'e',
1_5: 'f',
}
def a ( snake_case__: float ):
'''simple docstring'''
assert type(_UpperCamelCase ) in (int, float) and decimal == int(_UpperCamelCase )
lowercase_ = int(_UpperCamelCase )
lowercase_ = ''''''
lowercase_ = False
if decimal < 0:
lowercase_ = True
decimal *= -1
while decimal > 0:
lowercase_ , lowercase_ = divmod(_UpperCamelCase , 16 )
lowercase_ = values[remainder] + hexadecimal
lowercase_ = '''0x''' + hexadecimal
if negative:
lowercase_ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=10 , __UpperCAmelCase=3 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = patch_size
__lowerCamelCase = num_frames
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = attention_type
__lowerCamelCase = initializer_range
__lowerCamelCase = scope
__lowerCamelCase = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCamelCase = self.num_labels
return config
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TimesformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase )
# verify the logits shape
__lowerCamelCase = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase__ = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerModelTester(self )
__lowerCamelCase = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = copy.deepcopy(__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(__UpperCAmelCase )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = TimesformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
if not self.has_attentions:
pass
else:
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = True
for model_class in self.all_model_classes:
__lowerCamelCase = self.model_tester.seq_length
__lowerCamelCase = self.model_tester.num_frames
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCamelCase = len(__UpperCAmelCase )
# Check attention is always last and order is fine
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCAmelCase ) )
__lowerCamelCase = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
__lowerCamelCase = outputs.hidden_states
__lowerCamelCase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
__lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCamelCase ,__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def a__ ( ):
__lowerCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' ,filename='''eating_spaghetti.npy''' ,repo_type='''dataset''' )
__lowerCamelCase = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__UpperCAmelCase )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_video()
__lowerCamelCase = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase = model(**__UpperCAmelCase )
# verify the logits
__lowerCamelCase = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330 | 0 |
"""simple docstring"""
import os
from collections.abc import Iterator
def __lowerCamelCase ( a_ : str = "." ) -> List[str]:
for dir_path, dir_names, filenames in os.walk(_UpperCamelCase ):
__SCREAMING_SNAKE_CASE :Union[str, Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCamelCase , _UpperCamelCase ).lstrip('''./''' )
def __lowerCamelCase ( a_ : Tuple ) -> Dict:
return f'''{i * " "}*''' if i else "\n##"
def __lowerCamelCase ( a_ : str , a_ : str ) -> List[str]:
__SCREAMING_SNAKE_CASE :List[str] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_UpperCamelCase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCamelCase ( a_ : str = "." ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = ''''''
for filepath in sorted(good_file_paths(_UpperCamelCase ) ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :str = os.path.split(_UpperCamelCase )
if filepath != old_path:
__SCREAMING_SNAKE_CASE :List[Any] = print_path(_UpperCamelCase , _UpperCamelCase )
__SCREAMING_SNAKE_CASE :str = (filepath.count(os.sep ) + 1) if filepath else 0
__SCREAMING_SNAKE_CASE :str = f'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' )
__SCREAMING_SNAKE_CASE :Optional[int] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'''{md_prefix(_UpperCamelCase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".") | 191 |
def a__ ( _UpperCamelCase : int ):
if not isinstance(_UpperCamelCase ,_UpperCamelCase ):
__lowerCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCamelCase )
if number < 0:
return False
__lowerCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 | 0 |
class a :
def __init__( self :Any ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :str ):
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
snake_case__ : Optional[int] = graph
self._normalize_graph(__UpperCAmelCase ,__UpperCAmelCase )
snake_case__ : int = len(__UpperCAmelCase )
snake_case__ : List[str] = None
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :List[Any] ,__lowercase :Tuple ):
if sources is int:
snake_case__ : Dict = [sources]
if sinks is int:
snake_case__ : List[Any] = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
snake_case__ : Dict = sources[0]
snake_case__ : List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
snake_case__ : Optional[int] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
snake_case__ : Tuple = len(self.graph ) + 1
for room in self.graph:
room.insert(0 ,0 )
self.graph.insert(0 ,[0] * size )
for i in sources:
snake_case__ : Tuple = max_input_flow
snake_case__ : Tuple = 0
snake_case__ : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
snake_case__ : List[Any] = max_input_flow
snake_case__ : int = size - 1
def __lowerCamelCase ( self :int ):
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __lowerCamelCase ( self :Optional[int] ,__lowercase :Optional[int] ):
snake_case__ : Dict = algorithm(self )
class a :
def __init__( self :List[str] ,__lowercase :Any ):
snake_case__ : Tuple = flow_network
snake_case__ : str = flow_network.verticesCount
snake_case__ : Tuple = flow_network.sourceIndex
snake_case__ : int = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
snake_case__ : List[str] = flow_network.graph
snake_case__ : int = False
def __lowerCamelCase ( self :Optional[Any] ):
if not self.executed:
self._algorithm()
snake_case__ : Any = True
def __lowerCamelCase ( self :List[Any] ):
pass
class a ( lowerCAmelCase__ ):
def __init__( self :List[Any] ,__lowercase :List[str] ):
super().__init__(__UpperCAmelCase )
# use this to save your result
snake_case__ : Dict = -1
def __lowerCamelCase ( self :Union[str, Any] ):
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class a ( lowerCAmelCase__ ):
def __init__( self :List[str] ,__lowercase :Optional[int] ):
super().__init__(__UpperCAmelCase )
snake_case__ : Optional[Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
snake_case__ : Dict = [0] * self.verticies_count
snake_case__ : int = [0] * self.verticies_count
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
snake_case__ : Dict = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
snake_case__ : Tuple = 0
while i < len(__UpperCAmelCase ):
snake_case__ : List[Any] = vertices_list[i]
snake_case__ : Dict = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 ,vertices_list.pop(__UpperCAmelCase ) )
snake_case__ : Tuple = 0
else:
i += 1
snake_case__ : List[str] = sum(self.preflow[self.source_index] )
def __lowerCamelCase ( self :str ,__lowercase :List[str] ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase ,__UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def __lowerCamelCase ( self :List[Any] ,__lowercase :Optional[int] ,__lowercase :int ):
snake_case__ : Any = min(
self.excesses[from_index] ,self.graph[from_index][to_index] - self.preflow[from_index][to_index] ,)
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __lowerCamelCase ( self :List[str] ,__lowercase :List[Any] ):
snake_case__ : Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
snake_case__ : Tuple = self.heights[to_index]
if min_height is not None:
snake_case__ : Optional[Any] = min_height + 1
if __name__ == "__main__":
A__ = [0]
A__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__ = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 230 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy"""
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase )
return image
def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = '''bf16''' if fpaa else None
__lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained(
__UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase )
return model, params
def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase )
__lowerCamelCase = model.apply(
{'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase )
__lowerCamelCase = model.apply(
{'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
| 330 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str = None , lowerCAmelCase__ : List[Any] = None ) -> str:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : Tuple = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : Union[str, Any] = torch.zeros(__UpperCAmelCase , __UpperCAmelCase )
else:
_UpperCAmelCase : Dict = None
_UpperCAmelCase : Any = torch.nn.Parameter(__UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
"""simple docstring"""
UpperCamelCase_ : List[Any] = 42
UpperCamelCase_ : Any = 42
UpperCamelCase_ : List[Any] = 42
UpperCamelCase_ : Union[str, Any] = 42
UpperCamelCase_ : Optional[int] = 42
UpperCamelCase_ : Optional[Any] = 42
def __init__( self : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=__UpperCAmelCase , transformer=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , scheduler=__UpperCAmelCase , learned_classifier_free_sampling_embeddings=__UpperCAmelCase , )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : int = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else 1
# get prompt text embeddings
_UpperCAmelCase : List[str] = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_UpperCAmelCase : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Union[str, Any] = prompt_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : List[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Optional[Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(__UpperCAmelCase , 1 , 1 )
else:
_UpperCAmelCase : Tuple = [""] * batch_size
_UpperCAmelCase : List[str] = text_input_ids.shape[-1]
_UpperCAmelCase : str = self.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" , )
_UpperCAmelCase : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Optional[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : Union[str, Any] = negative_prompt_embeds.shape[1]
_UpperCAmelCase : Dict = negative_prompt_embeds.repeat(1 , __UpperCAmelCase , 1 )
_UpperCAmelCase : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : Optional[Any] = 5.0 , lowerCAmelCase__ : List[str] = 1.0 , lowerCAmelCase__ : List[str] = 1 , lowerCAmelCase__ : List[Any] = None , lowerCAmelCase__ : int = None , lowerCAmelCase__ : Any = "pil" , lowerCAmelCase__ : Union[str, Any] = True , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : List[Any] = 1 , ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_UpperCAmelCase : List[str] = 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_UpperCAmelCase : str = len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
_UpperCAmelCase : Optional[int] = batch_size * num_images_per_prompt
_UpperCAmelCase : List[str] = guidance_scale > 1.0
_UpperCAmelCase : Tuple = self._encode_prompt(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__UpperCAmelCase )}.""" )
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : str = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Dict = self.transformer.num_vector_embeds - 1
_UpperCAmelCase : int = torch.full(__UpperCAmelCase , __UpperCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_UpperCAmelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device )
_UpperCAmelCase : Optional[Any] = self.scheduler.timesteps.to(self.device )
_UpperCAmelCase : str = latents
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : List[str] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : str = self.transformer(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , timestep=__UpperCAmelCase ).sample
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase : List[str] = model_output.chunk(2 )
_UpperCAmelCase : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__UpperCAmelCase , dim=1 , keepdim=__UpperCAmelCase )
_UpperCAmelCase : Tuple = self.truncate(__UpperCAmelCase , __UpperCAmelCase )
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : int = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : Tuple = self.scheduler.step(__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : Dict = self.vqvae.quantize.get_codebook_entry(__UpperCAmelCase , shape=__UpperCAmelCase )
_UpperCAmelCase : List[str] = self.vqvae.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase ).sample
_UpperCAmelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = torch.sort(__UpperCAmelCase , 1 , descending=__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = torch.exp(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : str = torch.full_like(keep_mask[:, 0:1, :] , __UpperCAmelCase )
_UpperCAmelCase : Dict = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCAmelCase : List[Any] = keep_mask[:, :-1, :]
_UpperCAmelCase : str = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCAmelCase : List[str] = log_p_x_0.clone()
_UpperCAmelCase : Optional[int] = -torch.inf # -inf = log(0)
return rv | 145 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Union[str, Any] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a__ ( _UpperCamelCase : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = module
__lowerCamelCase = nn.Sequential(
nn.Linear(module.in_features , __UpperCAmelCase , bias=__UpperCAmelCase ) , nn.Linear(__UpperCAmelCase , module.out_features , bias=__UpperCAmelCase ) , )
__lowerCamelCase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=__UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def lowerCamelCase ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.module(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ) + self.adapter(__UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ = """bigscience/bloom-1b7"""
# Constant values
lowerCAmelCase__ = 2.1_09_65_95_52_69_25_74
lowerCAmelCase__ = """Hello my name is"""
lowerCAmelCase__ = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
lowerCAmelCase__ = 1_0
def lowerCamelCase ( self ):
'''simple docstring'''
# Models and tokenizer
__lowerCamelCase = AutoTokenizer.from_pretrained(self.model_name )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def lowerCamelCase ( self ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_abit.config
self.assertTrue(hasattr(__UpperCAmelCase , '''quantization_config''' ) )
__lowerCamelCase = config.to_dict()
__lowerCamelCase = config.to_diff_dict()
__lowerCamelCase = config.to_json_string()
def lowerCamelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
__lowerCamelCase = self.model_fpaa.get_memory_footprint()
__lowerCamelCase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowerCamelCase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(__UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitsAndBytesConfig()
__lowerCamelCase = True
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BitsAndBytesConfig()
with self.assertRaises(__UpperCAmelCase ):
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def lowerCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(__UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(__UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
__lowerCamelCase = self.model_fpaa.to(torch.floataa )
__lowerCamelCase = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.half()
# Check this does not throw an error
__lowerCamelCase = self.model_fpaa.float()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls ):
'''simple docstring'''
__lowerCamelCase = '''t5-small'''
__lowerCamelCase = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
__lowerCamelCase = AutoTokenizer.from_pretrained(cls.model_name )
__lowerCamelCase = '''Translate in German: Hello, my dog is cute'''
def lowerCamelCase ( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
__lowerCamelCase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowerCamelCase = None
# test with `t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
__lowerCamelCase = modules
def lowerCamelCase ( self ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
# test with `flan-t5-small`
__lowerCamelCase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
__lowerCamelCase = model.generate(**__UpperCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
# model_name
__lowerCamelCase = '''bigscience/bloom-560m'''
__lowerCamelCase = '''t5-small'''
# Different types of model
__lowerCamelCase = AutoModel.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Sequence classification model
__lowerCamelCase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# CausalLM model
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
# Seq2seq model
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=__UpperCAmelCase , device_map='''auto''' )
def lowerCamelCase ( self ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowerCamelCase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=__UpperCAmelCase , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowerCamelCase = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
__lowerCamelCase = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=__UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = '''facebook/opt-350m'''
super().setUp()
def lowerCamelCase ( self ):
'''simple docstring'''
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
__lowerCamelCase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=__UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowerCamelCase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowerCamelCase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(__UpperCAmelCase ) ):
__lowerCamelCase = LoRALayer(module.q_proj , rank=16 )
__lowerCamelCase = LoRALayer(module.k_proj , rank=16 )
__lowerCamelCase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowerCamelCase = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowerCamelCase = model.forward(**__UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(__UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """gpt2-xl"""
lowerCAmelCase__ = 3.31_91_85_48_54_15_21_87
| 330 | 0 |
from __future__ import annotations
def UpperCamelCase ( _A ):
"""simple docstring"""
if not nums:
return 0
__magic_name__ : Dict = nums[0]
__magic_name__ : Union[str, Any] = 0
for num in nums[1:]:
__magic_name__ ,__magic_name__ : Tuple = (
max_excluding + num,
max(_UpperCamelCase, _UpperCamelCase ),
)
return max(_UpperCamelCase, _UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = True
@register_to_config
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowerCamelCase ( self , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = use_tiling
def lowerCamelCase ( self ):
'''simple docstring'''
self.enable_tiling(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = True
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase )
for y in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase )
for x in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = sample
__lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=__UpperCAmelCase )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 330 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _a ( lowerCAmelCase__ ):
__a : List[str] = 42
__a : Dict = 42
__a : Union[str, Any] = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 34 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a_ = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a_ = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a_ = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a_ = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[Any] ):
for tf_name, hf_name in patterns:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = BigBirdPegasusConfig(**_UpperCamelCase )
__lowerCamelCase = BigBirdPegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
__lowerCamelCase = {}
# separating decoder weights
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__lowerCamelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = DECODER_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() ,'''tf -> hf conversion''' ):
__lowerCamelCase = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCamelCase ):
continue
__lowerCamelCase = REMAINING_PATTERNS
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase ,_UpperCamelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__lowerCamelCase = v.T
__lowerCamelCase = torch.from_numpy(_UpperCamelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__lowerCamelCase = mapping['''model.embed_positions.weight''']
__lowerCamelCase = mapping.pop('''model.embed_positions.weight''' )
__lowerCamelCase ,__lowerCamelCase = torch_model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : int ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : dict ):
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = convert_bigbird_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
a_ = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 330 | 0 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 305 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ = logging.get_logger(__name__)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
if not conversation_id:
__lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
__lowerCamelCase = []
if generated_responses is None:
__lowerCamelCase = []
__lowerCamelCase = conversation_id
__lowerCamelCase = past_user_inputs
__lowerCamelCase = generated_responses
__lowerCamelCase = text
def __eq__( self , __UpperCAmelCase ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
__lowerCamelCase = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__lowerCamelCase = text
def lowerCamelCase ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowerCamelCase = None
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
self.generated_responses.append(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
__lowerCamelCase = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__lowerCamelCase = '''user''' if is_user else '''bot'''
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
lowerCAmelCase__ , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
__lowerCamelCase = self.tokenizer.eos_token
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
__lowerCamelCase = {}
if min_length_for_response is not None:
__lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
__lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
__lowerCamelCase = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase=0 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = super().__call__(__UpperCAmelCase , num_workers=__UpperCAmelCase , **__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=32 ):
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__lowerCamelCase = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowerCamelCase = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
__lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=10 , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__lowerCamelCase = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__lowerCamelCase = max_length - minimum_tokens
__lowerCamelCase = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
__lowerCamelCase = model_inputs['''attention_mask'''][:, -trim:]
__lowerCamelCase = model_inputs.pop('''conversation''' )
__lowerCamelCase = max_length
__lowerCamelCase = self.model.generate(**__UpperCAmelCase , **__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
__lowerCamelCase = 1
else:
__lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = model_outputs['''output_ids''']
__lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase , )
__lowerCamelCase = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer.eos_token_id
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
__lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 330 | 0 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCAmelCase :str = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : int , *_A : Tuple , **_A : Optional[int] ) -> List[str]:
super().__init__(*_A , **_A )
requires_backends(self , 'decord' )
self.check_model_type(_A )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Optional[int]=None ) -> int:
__magic_name__ : int = {}
if frame_sampling_rate is not None:
__magic_name__ : Union[str, Any] = frame_sampling_rate
if num_frames is not None:
__magic_name__ : Optional[int] = num_frames
__magic_name__ : int = {}
if top_k is not None:
__magic_name__ : Union[str, Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , _A : Union[str, List[str]] , **_A : int ) -> Optional[int]:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any]=None , _A : Union[str, Any]=1 ) -> Union[str, Any]:
if num_frames is None:
__magic_name__ : List[str] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
__magic_name__ : Dict = BytesIO(requests.get(_A ).content )
__magic_name__ : List[Any] = VideoReader(_A )
videoreader.seek(0 )
__magic_name__ : Any = 0
__magic_name__ : int = num_frames * frame_sampling_rate - 1
__magic_name__ : Optional[Any] = np.linspace(_A , _A , num=_A , dtype=np.intaa )
__magic_name__ : List[Any] = videoreader.get_batch(_A ).asnumpy()
__magic_name__ : List[str] = list(_A )
__magic_name__ : Union[str, Any] = self.image_processor(_A , return_tensors=self.framework )
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Dict ) -> List[str]:
__magic_name__ : Union[str, Any] = self.model(**_A )
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] , _A : Optional[Any]=5 ) -> Tuple:
if top_k > self.model.config.num_labels:
__magic_name__ : int = self.model.config.num_labels
if self.framework == "pt":
__magic_name__ : List[str] = model_outputs.logits.softmax(-1 )[0]
__magic_name__ , __magic_name__ : Tuple = probs.topk(_A )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Tuple = scores.tolist()
__magic_name__ : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_A , _A )] | 331 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 1 |
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase :Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase :List[Any] = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase :Dict = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : Optional[int] = get_test_to_tester_mapping(_A )
__magic_name__ : List[Any] = get_test_to_tester_mapping(_A )
__magic_name__ : Union[str, Any] = {'BertModelTest': 'BertModelTester'}
__magic_name__ : Dict = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def __lowerCAmelCase ( self : Tuple ) -> Dict:
__magic_name__ : Dict = get_model_to_test_mapping(_A )
__magic_name__ : List[str] = get_model_to_test_mapping(_A )
__magic_name__ : Optional[Any] = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
__magic_name__ : str = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A )
def __lowerCAmelCase ( self : Dict ) -> Dict:
__magic_name__ : Optional[Any] = get_model_to_tester_mapping(_A )
__magic_name__ : Optional[Any] = get_model_to_tester_mapping(_A )
__magic_name__ : Optional[Any] = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
__magic_name__ : List[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_A ) , _A )
self.assertEqual(get_test_info.to_json(_A ) , _A ) | 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 1 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase :List[Any] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = ["""input_features"""]
def __init__( self : str , _A : List[Any]=80 , _A : int=16000 , _A : List[Any]=160 , _A : Tuple=30 , _A : Tuple=400 , _A : Union[str, Any]=0.0 , _A : List[Any]=False , **_A : Optional[Any] , ) -> Optional[Any]:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
__magic_name__ : Dict = n_fft
__magic_name__ : Optional[int] = hop_length
__magic_name__ : Tuple = chunk_length
__magic_name__ : int = chunk_length * sampling_rate
__magic_name__ : Dict = self.n_samples // hop_length
__magic_name__ : int = sampling_rate
__magic_name__ : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_A , norm='slaney' , mel_scale='slaney' , )
def __lowerCAmelCase ( self : Optional[int] , _A : np.array ) -> np.ndarray:
__magic_name__ : int = spectrogram(
_A , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
__magic_name__ : Optional[Any] = log_spec[:, :-1]
__magic_name__ : List[str] = np.maximum(_A , log_spec.max() - 8.0 )
__magic_name__ : Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowerCAmelCase ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
__magic_name__ : Tuple = np.array(_A , np.intaa )
__magic_name__ : Optional[int] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
__magic_name__ : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__magic_name__ : Optional[int] = padding_value
normed_input_values.append(_A )
else:
__magic_name__ : List[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : str , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__magic_name__ : Any = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__magic_name__ : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__magic_name__ : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__magic_name__ : Union[str, Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__magic_name__ : int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__magic_name__ : Optional[Any] = [np.asarray([raw_speech] ).T]
__magic_name__ : Optional[Any] = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
__magic_name__ : int = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
__magic_name__ : str = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
__magic_name__ : List[str] = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
__magic_name__ : Union[str, Any] = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
__magic_name__ : int = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
__magic_name__ : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
__magic_name__ : List[Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__magic_name__ : Tuple = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
__magic_name__ : Dict = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __lowerCAmelCase ( self : List[Any] ) -> Dict[str, Any]:
__magic_name__ : Tuple = copy.deepcopy(self.__dict__ )
__magic_name__ : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 331 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : List[Any] , _A : Optional[int]=13 , _A : int=7 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Tuple=True , _A : int=99 , _A : Tuple=32 , _A : Optional[int]=5 , _A : Union[str, Any]=4 , _A : str=37 , _A : Tuple="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : List[str]=16 , _A : Optional[Any]=2 , _A : int=0.02 , _A : List[Any]=3 , _A : Optional[Any]=4 , _A : str=None , ) -> List[Any]:
__magic_name__ : str = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : Dict = is_training
__magic_name__ : str = use_token_type_ids
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Any = vocab_size
__magic_name__ : Optional[Any] = hidden_size
__magic_name__ : Tuple = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Dict = attention_probs_dropout_prob
__magic_name__ : str = max_position_embeddings
__magic_name__ : List[Any] = type_vocab_size
__magic_name__ : Optional[int] = type_sequence_label_size
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[Any] = num_labels
__magic_name__ : Any = num_choices
__magic_name__ : str = scope
__magic_name__ : Optional[int] = self.vocab_size - 1
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] = None
if self.use_token_type_ids:
__magic_name__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : int = None
__magic_name__ : Dict = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Tuple = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__magic_name__ : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCAmelCase ( self : List[Any] , _A : List[Any] , _A : str , _A : Any , _A : Union[str, Any] , *_A : Tuple ) -> int:
__magic_name__ : Tuple = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Dict = model(_A , token_type_ids=_A , head_mask=_A )
__magic_name__ : str = model(_A , token_type_ids=_A )
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : Any , _A : Dict , _A : Optional[Any] , *_A : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[int] = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] , _A : Tuple , _A : int , _A : Dict , _A : List[str] , *_A : Dict ) -> str:
__magic_name__ : Tuple = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
__magic_name__ : Dict = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : Optional[Any] , *_A : Dict ) -> Optional[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Optional[Any] = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : List[str] = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int ) -> Dict:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Union[str, Any] = config_and_inputs
__magic_name__ : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A_ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A_ : Optional[Any] = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : int , _A : Optional[int] , _A : Tuple , _A : int , _A : int , _A : Union[str, Any] ) -> Union[str, Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[str] , _A : List[str] , _A : Optional[int]=False ) -> List[str]:
__magic_name__ : List[Any] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__magic_name__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , )
__magic_name__ : List[str] = inputs_dict['labels']
__magic_name__ : Optional[Any] = inputs_dict['labels']
__magic_name__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , )
__magic_name__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
__magic_name__ : Optional[int] = OpenAIGPTModelTester(self )
__magic_name__ : Dict = ConfigTester(self , config_class=_A , n_embd=37 )
def __lowerCAmelCase ( self : Any ) -> str:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def __lowerCAmelCase ( self : Dict ) -> str:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Dict = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_A )
__magic_name__ : Dict = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_A ) # the president is
__magic_name__ : Any = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__magic_name__ : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A ) | 331 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 331 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : bool = True , lowerCAmelCase : float = math.inf , lowerCAmelCase : float = -math.inf , lowerCAmelCase : float = math.inf , lowerCAmelCase : float = -math.inf , lowerCAmelCase : bool = False , lowerCAmelCase : float = 100 , lowerCAmelCase : float = 0.01 , lowerCAmelCase : float = 1 , ):
"""simple docstring"""
__magic_name__ : List[str] = False
__magic_name__ : Tuple = search_prob
__magic_name__ : str = start_temperate
__magic_name__ : Tuple = []
__magic_name__ : Optional[int] = 0
__magic_name__ : Dict = None
while not search_end:
__magic_name__ : Union[str, Any] = current_state.score()
if best_state is None or current_score > best_state.score():
__magic_name__ : Any = current_state
scores.append(lowerCAmelCase )
iterations += 1
__magic_name__ : Any = None
__magic_name__ : List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__magic_name__ : Tuple = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
__magic_name__ : int = neighbors.pop(lowerCAmelCase )
__magic_name__ : List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__magic_name__ : Union[str, Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__magic_name__ : List[Any] = picked_neighbor
else:
__magic_name__ : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__magic_name__ : Any = picked_neighbor
__magic_name__ : Optional[int] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__magic_name__ : List[Any] = True
else:
__magic_name__ : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : int ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase :int = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase :List[str] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase :Union[str, Any] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase :List[Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Tuple ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase :Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase :Tuple = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
lowerCAmelCase :str = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase :Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
) | 331 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase :Union[str, Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _lowerCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
A_ : int = 1_00_00
A_ : Optional[List[str]] = None
A_ : Optional[datasets.Features] = None
class _lowerCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
A_ : Dict = ParquetConfig
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
__magic_name__ : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
__magic_name__ : str = data_files
if isinstance(_A , _A ):
__magic_name__ : Optional[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__magic_name__ : List[str] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__magic_name__ : Dict = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
__magic_name__ : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__magic_name__ : Dict = [dl_manager.iter_files(_A ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_A ):
with open(_A , 'rb' ) as f:
__magic_name__ : List[Any] = datasets.Features.from_arrow_schema(pq.read_schema(_A ) )
break
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files} ) )
return splits
def __lowerCAmelCase ( self : Union[str, Any] , _A : pa.Table ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__magic_name__ : str = table_cast(_A , self.info.features.arrow_schema )
return pa_table
def __lowerCAmelCase ( self : Any , _A : str ) -> int:
__magic_name__ : List[str] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , 'rb' ) as f:
__magic_name__ : Optional[int] = pq.ParquetFile(_A )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__magic_name__ : Optional[Any] = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'{file_idx}_{batch_idx}', self._cast_table(_A )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(_A )}: {e}' )
raise | 331 |
'''simple docstring'''
class _lowerCamelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
__magic_name__ : Tuple = row
__magic_name__ : str = col
__magic_name__ : Optional[Any] = graph
def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
__magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1]
__magic_name__ : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands.
__magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__magic_name__ : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 331 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : "DiagonalGaussianDistribution"
class _lowerCamelCase ( lowercase__ , lowercase__ ):
'''simple docstring'''
A_ : Any = True
@register_to_config
def __init__( self : Dict , _A : int = 3 , _A : int = 3 , _A : Tuple[str] = ("DownEncoderBlock2D",) , _A : Tuple[str] = ("UpDecoderBlock2D",) , _A : Tuple[int] = (64,) , _A : int = 1 , _A : str = "silu" , _A : int = 4 , _A : int = 32 , _A : int = 32 , _A : float = 0.1_8215 , ) -> Dict:
super().__init__()
# pass init params to Encoder
__magic_name__ : str = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
# pass init params to Decoder
__magic_name__ : Union[str, Any] = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , norm_num_groups=_A , act_fn=_A , )
__magic_name__ : List[str] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__magic_name__ : int = nn.Convad(_A , _A , 1 )
__magic_name__ : List[str] = False
__magic_name__ : Tuple = False
# only relevant if vae tiling is enabled
__magic_name__ : Optional[int] = self.config.sample_size
__magic_name__ : List[str] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__magic_name__ : Optional[Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__magic_name__ : Optional[int] = 0.25
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : List[Any]=False ) -> Any:
if isinstance(_A , (Encoder, Decoder) ):
__magic_name__ : Union[str, Any] = value
def __lowerCAmelCase ( self : Union[str, Any] , _A : bool = True ) -> List[Any]:
__magic_name__ : int = use_tiling
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self.enable_tiling(_A )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : Optional[Any] = True
def __lowerCAmelCase ( self : str ) -> List[str]:
__magic_name__ : List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __lowerCAmelCase ( self : List[str] ) -> Dict[str, AttentionProcessor]:
__magic_name__ : int = {}
def fn_recursive_add_processors(_A : str , _A : torch.nn.Module , _A : Dict[str, AttentionProcessor] ):
if hasattr(_A , 'set_processor' ):
__magic_name__ : str = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def __lowerCAmelCase ( self : Dict , _A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Tuple:
__magic_name__ : Dict = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(_A )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(_A : str , _A : torch.nn.Module , _A : List[Any] ):
if hasattr(_A , 'set_processor' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __lowerCAmelCase ( self : List[str] , _A : torch.FloatTensor , _A : bool = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_A , return_dict=_A )
if self.use_slicing and x.shape[0] > 1:
__magic_name__ : Union[str, Any] = [self.encoder(_A ) for x_slice in x.split(1 )]
__magic_name__ : Union[str, Any] = torch.cat(_A )
else:
__magic_name__ : List[str] = self.encoder(_A )
__magic_name__ : Tuple = self.quant_conv(_A )
__magic_name__ : Any = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def __lowerCAmelCase ( self : Union[str, Any] , _A : torch.FloatTensor , _A : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_A , return_dict=_A )
__magic_name__ : Any = self.post_quant_conv(_A )
__magic_name__ : str = self.decoder(_A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
@apply_forward_hook
def __lowerCAmelCase ( self : List[Any] , _A : torch.FloatTensor , _A : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
__magic_name__ : List[Any] = [self._decode(_A ).sample for z_slice in z.split(1 )]
__magic_name__ : str = torch.cat(_A )
else:
__magic_name__ : List[str] = self._decode(_A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_A )
def __lowerCAmelCase ( self : List[Any] , _A : str , _A : int , _A : Optional[Any] ) -> List[Any]:
__magic_name__ : Optional[Any] = min(a.shape[2] , b.shape[2] , _A )
for y in range(_A ):
__magic_name__ : int = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : str ) -> Dict:
__magic_name__ : Optional[Any] = min(a.shape[3] , b.shape[3] , _A )
for x in range(_A ):
__magic_name__ : Optional[Any] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __lowerCAmelCase ( self : Optional[Any] , _A : torch.FloatTensor , _A : bool = True ) -> AutoencoderKLOutput:
__magic_name__ : int = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__magic_name__ : Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__magic_name__ : Tuple = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__magic_name__ : List[Any] = []
for i in range(0 , x.shape[2] , _A ):
__magic_name__ : int = []
for j in range(0 , x.shape[3] , _A ):
__magic_name__ : str = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__magic_name__ : List[Any] = self.encoder(_A )
__magic_name__ : Union[str, Any] = self.quant_conv(_A )
row.append(_A )
rows.append(_A )
__magic_name__ : Union[str, Any] = []
for i, row in enumerate(_A ):
__magic_name__ : List[Any] = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__magic_name__ : Dict = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__magic_name__ : int = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__magic_name__ : Optional[int] = torch.cat(_A , dim=2 )
__magic_name__ : List[str] = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def __lowerCAmelCase ( self : Tuple , _A : torch.FloatTensor , _A : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__magic_name__ : List[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__magic_name__ : Any = int(self.tile_sample_min_size * self.tile_overlap_factor )
__magic_name__ : List[Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__magic_name__ : Tuple = []
for i in range(0 , z.shape[2] , _A ):
__magic_name__ : List[Any] = []
for j in range(0 , z.shape[3] , _A ):
__magic_name__ : Optional[Any] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__magic_name__ : Optional[int] = self.post_quant_conv(_A )
__magic_name__ : List[str] = self.decoder(_A )
row.append(_A )
rows.append(_A )
__magic_name__ : Dict = []
for i, row in enumerate(_A ):
__magic_name__ : List[Any] = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__magic_name__ : str = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__magic_name__ : Tuple = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__magic_name__ : Optional[Any] = torch.cat(_A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def __lowerCAmelCase ( self : List[Any] , _A : torch.FloatTensor , _A : bool = False , _A : bool = True , _A : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
__magic_name__ : Union[str, Any] = sample
__magic_name__ : Tuple = self.encode(_A ).latent_dist
if sample_posterior:
__magic_name__ : Optional[Any] = posterior.sample(generator=_A )
else:
__magic_name__ : Optional[Any] = posterior.mode()
__magic_name__ : List[str] = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A ) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase :str = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowerCAmelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 331 | 1 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase :List[str] = [
('''bert.bert''', '''visual_bert'''),
('''bert.cls''', '''cls'''),
('''bert.classifier''', '''cls'''),
('''token_type_embeddings_visual''', '''visual_token_type_embeddings'''),
('''position_embeddings_visual''', '''visual_position_embeddings'''),
('''projection''', '''visual_projection'''),
]
lowerCAmelCase :List[Any] = [
'''nlvr2_coco_pre_trained.th''',
'''nlvr2_fine_tuned.th''',
'''nlvr2_pre_trained.th''',
'''vcr_coco_pre_train.th''',
'''vcr_fine_tune.th''',
'''vcr_pre_train.th''',
'''vqa_coco_pre_trained.th''',
'''vqa_fine_tuned.th''',
'''vqa_pre_trained.th''',
]
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Optional[Any] = torch.load(lowerCAmelCase , map_location='cpu' )
return sd
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Tuple=rename_keys_prefix ):
"""simple docstring"""
__magic_name__ : int = OrderedDict()
__magic_name__ : Optional[int] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__magic_name__ : Optional[int] = key
for name_pair in rename_keys_prefix:
__magic_name__ : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
__magic_name__ : List[str] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__magic_name__ : Union[str, Any] = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : List[Any] ):
"""simple docstring"""
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
__magic_name__ : List[str] = 'pretraining'
if "vcr" in checkpoint_path:
__magic_name__ : List[str] = {'visual_embedding_dim': 512}
elif "vqa_advanced" in checkpoint_path:
__magic_name__ : int = {'visual_embedding_dim': 2048}
elif "vqa" in checkpoint_path:
__magic_name__ : Union[str, Any] = {'visual_embedding_dim': 2048}
elif "nlvr" in checkpoint_path:
__magic_name__ : Any = {'visual_embedding_dim': 1024}
else:
raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
__magic_name__ : Tuple = {'visual_embedding_dim': 512}
__magic_name__ : Tuple = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
__magic_name__ : Any = {'visual_embedding_dim': 2048}
__magic_name__ : Optional[Any] = 'vqa_advanced'
elif "vqa" in checkpoint_path:
__magic_name__ : Union[str, Any] = {'visual_embedding_dim': 2048, 'num_labels': 3129}
__magic_name__ : Tuple = 'vqa'
elif "nlvr" in checkpoint_path:
__magic_name__ : Optional[Any] = {
'visual_embedding_dim': 1024,
'num_labels': 2,
}
__magic_name__ : List[Any] = 'nlvr'
__magic_name__ : Optional[int] = VisualBertConfig(**lowerCAmelCase )
# Load State Dict
__magic_name__ : Tuple = load_state_dict(lowerCAmelCase )
__magic_name__ : Tuple = get_new_dict(lowerCAmelCase , lowerCAmelCase )
if model_type == "pretraining":
__magic_name__ : Any = VisualBertForPreTraining(lowerCAmelCase )
elif model_type == "vqa":
__magic_name__ : Optional[Any] = VisualBertForQuestionAnswering(lowerCAmelCase )
elif model_type == "nlvr":
__magic_name__ : Optional[int] = VisualBertForVisualReasoning(lowerCAmelCase )
elif model_type == "multichoice":
__magic_name__ : Optional[Any] = VisualBertForMultipleChoice(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
# Save Checkpoints
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''orig_checkpoint_path''', type=str, help='''A path to .th on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase :Any = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path) | 331 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
__magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__magic_name__ : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 331 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = AltDiffusionPipeline
A_ : Any = TEXT_TO_IMAGE_PARAMS
A_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
A_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
A_ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
__magic_name__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__magic_name__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__magic_name__ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__magic_name__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
__magic_name__ : Tuple = CLIPTextModel(_A )
__magic_name__ : Tuple = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
__magic_name__ : int = 77
__magic_name__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[str] , _A : str=0 ) -> Union[str, Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : List[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self : int ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
__magic_name__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ : List[str] = self.get_dummy_components()
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__magic_name__ : Optional[Any] = RobertaSeriesModelWithTransformation(_A )
__magic_name__ : Optional[Any] = text_encoder
__magic_name__ : Union[str, Any] = AltDiffusionPipeline(**_A )
__magic_name__ : Optional[int] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Optional[Any] = self.get_dummy_inputs(_A )
__magic_name__ : Dict = 'A photo of an astronaut'
__magic_name__ : Dict = alt_pipe(**_A )
__magic_name__ : Optional[Any] = output.images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Any = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : str ) -> List[str]:
__magic_name__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ : str = self.get_dummy_components()
__magic_name__ : List[str] = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
__magic_name__ : Optional[int] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__magic_name__ : List[str] = RobertaSeriesModelWithTransformation(_A )
__magic_name__ : List[Any] = text_encoder
__magic_name__ : Any = AltDiffusionPipeline(**_A )
__magic_name__ : Optional[Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Optional[int] = self.get_dummy_inputs(_A )
__magic_name__ : Dict = alt_pipe(**_A )
__magic_name__ : int = output.images
__magic_name__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ : Optional[int] = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Any ) -> List[str]:
# make sure here that pndm scheduler skips prk
__magic_name__ : Tuple = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=_A )
__magic_name__ : Any = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : List[str] = 'A painting of a squirrel eating a burger'
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
__magic_name__ : str = alt_pipe([prompt] , generator=_A , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
__magic_name__ : List[Any] = output.images
__magic_name__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ : Dict = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : int = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
__magic_name__ : Dict = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=_A , safety_checker=_A )
__magic_name__ : List[Any] = alt_pipe.to(_A )
alt_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Optional[Any] = 'A painting of a squirrel eating a burger'
__magic_name__ : int = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = alt_pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='numpy' )
__magic_name__ : List[str] = output.images
__magic_name__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ : List[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
for char in word:
__magic_name__ : List[str] = ord(lowerCAmelCase )
if not _is_chinese_char(lowerCAmelCase ):
return 0
return 1
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : Dict = set()
for token in tokens:
__magic_name__ : int = len(lowerCAmelCase ) > 1 and is_chinese(lowerCAmelCase )
if chinese_word:
word_set.add(lowerCAmelCase )
__magic_name__ : Union[str, Any] = list(lowerCAmelCase )
return word_list
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
__magic_name__ : Any = max([len(lowerCAmelCase ) for w in chinese_word_set] )
__magic_name__ : int = bert_tokens
__magic_name__ , __magic_name__ : int = 0, len(lowerCAmelCase )
while start < end:
__magic_name__ : Union[str, Any] = True
if is_chinese(bert_word[start] ):
__magic_name__ : List[str] = min(end - start , lowerCAmelCase )
for i in range(lowerCAmelCase , 1 , -1 ):
__magic_name__ : List[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__magic_name__ : Union[str, Any] = '##' + bert_word[j]
__magic_name__ : Any = start + i
__magic_name__ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : LTP , lowerCAmelCase : BertTokenizer ):
"""simple docstring"""
__magic_name__ : int = []
for i in range(0 , len(lowerCAmelCase ) , 100 ):
__magic_name__ : str = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__magic_name__ : int = [get_chinese_word(lowerCAmelCase ) for r in res]
ltp_res.extend(lowerCAmelCase )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
__magic_name__ : Optional[Any] = []
for i in range(0 , len(lowerCAmelCase ) , 100 ):
__magic_name__ : Optional[int] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCAmelCase , truncation=lowerCAmelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
__magic_name__ : List[Any] = []
for input_ids, chinese_word in zip(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : Union[str, Any] = []
for id in input_ids:
__magic_name__ : Union[str, Any] = bert_tokenizer._convert_id_to_token(lowerCAmelCase )
input_tokens.append(lowerCAmelCase )
__magic_name__ : Union[str, Any] = add_sub_symbol(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[int] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCAmelCase ):
if token[:2] == "##":
__magic_name__ : Optional[int] = token[2:]
# save chinese tokens' pos
if len(lowerCAmelCase ) == 1 and _is_chinese_char(ord(lowerCAmelCase ) ):
ref_id.append(lowerCAmelCase )
ref_ids.append(lowerCAmelCase )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
return ref_ids
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__magic_name__ : Optional[Any] = f.readlines()
__magic_name__ : int = [line.strip() for line in data if len(lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__magic_name__ : int = LTP(args.ltp ) # faster in GPU device
__magic_name__ : int = BertTokenizer.from_pretrained(args.bert )
__magic_name__ : Union[str, Any] = prepare_ref(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__magic_name__ : Optional[int] = [json.dumps(lowerCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
lowerCAmelCase :str = parser.parse_args()
main(args) | 331 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
if edge <= 0 or not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase :int = '''pt'''
elif is_tf_available():
lowerCAmelCase :Optional[Any] = '''tf'''
else:
lowerCAmelCase :Optional[Any] = '''jax'''
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ByTaTokenizer
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__magic_name__ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ : Optional[Any] = []
for i in range(len(_A ) ):
try:
__magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__magic_name__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__magic_name__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ : List[str] = [t[0] for t in toks]
# Ensure consistency
__magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__magic_name__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__magic_name__ : Union[str, Any] = ' ' + output_txt
__magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Any = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.ta_base_tokenizer
__magic_name__ : Optional[int] = 'Unicode €.'
__magic_name__ : Optional[Any] = tokenizer(_A )
__magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : Any = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__magic_name__ : Any = tokenizer('e è é ê ë' )
__magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : List[str] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : List[Any] = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__magic_name__ : str = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = self.ta_base_tokenizer
__magic_name__ : Tuple = [
'Summary of the text.',
'Another summary.',
]
__magic_name__ : Dict = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : Any = ['A long paragraph for summarization. </s>']
__magic_name__ : List[str] = ['Summary of the text. </s>']
# fmt: off
__magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__magic_name__ : str = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def __lowerCAmelCase ( self : Any ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )]
__magic_name__ : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__magic_name__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__magic_name__ : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> int:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> int:
pass
def __lowerCAmelCase ( self : str ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__magic_name__ : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ : List[str] = 0
__magic_name__ : str = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int = 1000 ):
"""simple docstring"""
__magic_name__ : List[Any] = -1
__magic_name__ : List[str] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__magic_name__ : str = (n * n - 2 * a * n) // (2 * n - 2 * a)
__magic_name__ : Optional[Any] = n - a - b
if c * c == (a * a + b * b):
__magic_name__ : Any = a * b * c
if candidate >= product:
__magic_name__ : List[Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }') | 331 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 1 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 331 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 ) | 331 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase :List[Any] = logging.getLogger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : str , _A : Dict , _A : List[Any] , _A : Optional[int] , _A : int=None ) -> int:
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
__magic_name__ : List[Any] = None
def __lowerCAmelCase ( self : Optional[int] , _A : int ) -> List[str]:
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
__magic_name__ : Dict = self._infer_socket_ifname()
# avoid clash with the NCCL port
__magic_name__ : int = str(distributed_port + 1 )
__magic_name__ : str = dist.new_group(ranks=_A , backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __lowerCAmelCase ( self : Optional[Any] ) -> str:
return dist.get_rank(group=self.process_group ) == 0
def __lowerCAmelCase ( self : List[str] , _A : Any , _A : Union[str, Any] , _A : str=torch.floataa ) -> str:
__magic_name__ : str = torch.empty(_A , dtype=_A )
dist.scatter(_A , src=0 , scatter_list=_A , group=self.process_group )
return target_tensor
def __lowerCAmelCase ( self : Tuple ) -> Any:
__magic_name__ : Any = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__magic_name__ : List[Any] = next((addr for addr in addrs if addr.startswith('e' )) , _A )
return ifname
def __lowerCAmelCase ( self : Any , _A : np.ndarray , _A : int ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
__magic_name__ , __magic_name__ : Optional[int] = self._main_retrieve(_A , _A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
# distributed training
__magic_name__ : Dict = dist.get_world_size(group=self.process_group )
# gather logic
__magic_name__ : str = None
if self._is_main():
__magic_name__ : Optional[int] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_A )]
dist.gather(torch.tensor(_A ) , dst=0 , gather_list=_A , group=self.process_group )
# scatter logic
__magic_name__ : Dict = question_hidden_states.shape[0]
__magic_name__ : int = []
__magic_name__ : Dict = []
if self._is_main():
assert len(_A ) == world_size
__magic_name__ , __magic_name__ : Any = self._main_retrieve(torch.cat(_A ).numpy() , _A )
__magic_name__ , __magic_name__ : Any = torch.tensor(_A ), torch.tensor(_A )
__magic_name__ : int = self._chunk_tensor(_A , _A )
__magic_name__ : Any = self._chunk_tensor(_A , _A )
__magic_name__ : Tuple = self._scattered(_A , [n_queries, n_docs] , target_type=torch.intaa )
__magic_name__ : Tuple = self._scattered(_A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_A ) | 331 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 1 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = (DDIMParallelScheduler,)
A_ : Optional[int] = (("""eta""", 0.0), ("""num_inference_steps""", 50))
def __lowerCAmelCase ( self : Optional[int] , **_A : List[str] ) -> Optional[Any]:
__magic_name__ : Dict = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : Union[str, Any] , **_A : Tuple ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : Optional[Any] = self.get_scheduler_config(**_A )
__magic_name__ : Optional[int] = scheduler_class(**_A )
__magic_name__ , __magic_name__ : List[str] = 10, 0.0
__magic_name__ : Optional[Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for t in scheduler.timesteps:
__magic_name__ : List[Any] = model(_A , _A )
__magic_name__ : Optional[int] = scheduler.step(_A , _A , _A , _A ).prev_sample
return sample
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_A )
__magic_name__ : Any = self.scheduler_classes[0]
__magic_name__ : List[Any] = self.get_scheduler_config(steps_offset=1 )
__magic_name__ : List[str] = scheduler_class(**_A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def __lowerCAmelCase ( self : int ) -> str:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : str ) -> int:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_A )
def __lowerCAmelCase ( self : Dict ) -> Dict:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_A )
def __lowerCAmelCase ( self : Any ) -> Dict:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Dict ) -> Any:
for t in [1, 10, 49]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_A , num_inference_steps=_A )
def __lowerCAmelCase ( self : str ) -> Tuple:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_A , eta=_A )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : int = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> Any:
__magic_name__ : Any = self.scheduler_classes[0]
__magic_name__ : int = self.get_scheduler_config()
__magic_name__ : int = scheduler_class(**_A )
__magic_name__ , __magic_name__ : Optional[int] = 10, 0.0
scheduler.set_timesteps(_A )
__magic_name__ : Optional[Any] = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : List[str] = self.dummy_sample_deter + 0.1
__magic_name__ : Tuple = self.dummy_sample_deter - 0.1
__magic_name__ : Tuple = samplea.shape[0]
__magic_name__ : Dict = torch.stack([samplea, samplea, samplea] , dim=0 )
__magic_name__ : Optional[Any] = torch.arange(_A )[0:3, None].repeat(1 , _A )
__magic_name__ : List[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__magic_name__ : Any = scheduler.batch_step_no_noise(_A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _A )
__magic_name__ : Optional[Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Optional[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> Dict:
__magic_name__ : List[str] = self.full_loop()
__magic_name__ : Any = torch.sum(torch.abs(_A ) )
__magic_name__ : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def __lowerCAmelCase ( self : int ) -> List[Any]:
__magic_name__ : List[Any] = self.full_loop(prediction_type='v_prediction' )
__magic_name__ : List[str] = torch.sum(torch.abs(_A ) )
__magic_name__ : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> str:
# We specify different beta, so that the first alpha is 0.99
__magic_name__ : Optional[Any] = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
__magic_name__ : Any = torch.sum(torch.abs(_A ) )
__magic_name__ : Optional[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
# We specify different beta, so that the first alpha is 0.99
__magic_name__ : Optional[Any] = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
__magic_name__ : Any = torch.sum(torch.abs(_A ) )
__magic_name__ : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3 | 331 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _A : List[str] , _A : Optional[Any]=13 , _A : List[Any]=32 , _A : Union[str, Any]=3 , _A : int=4 , _A : Optional[Any]=[10, 20, 30, 40] , _A : Optional[Any]=[2, 2, 3, 2] , _A : int=True , _A : Optional[int]=True , _A : Union[str, Any]=37 , _A : Optional[int]="gelu" , _A : Any=10 , _A : List[str]=0.02 , _A : List[Any]=["stage2", "stage3", "stage4"] , _A : List[Any]=3 , _A : str=None , ) -> Union[str, Any]:
__magic_name__ : Dict = parent
__magic_name__ : Union[str, Any] = batch_size
__magic_name__ : Tuple = image_size
__magic_name__ : Optional[int] = num_channels
__magic_name__ : int = num_stages
__magic_name__ : Tuple = hidden_sizes
__magic_name__ : List[Any] = depths
__magic_name__ : List[Any] = is_training
__magic_name__ : List[str] = use_labels
__magic_name__ : Dict = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : Union[str, Any] = out_features
__magic_name__ : List[Any] = num_labels
__magic_name__ : Any = scope
__magic_name__ : Any = num_stages
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Dict ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_A , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=_A , loss_ignore_index=255 , num_labels=self.num_labels , )
def __lowerCAmelCase ( self : Optional[int] , _A : int , _A : Any , _A : List[str] ) -> List[str]:
__magic_name__ : Tuple = UperNetForSemanticSegmentation(config=_A )
model.to(_A )
model.eval()
__magic_name__ : str = model(_A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Dict = config_and_inputs
__magic_name__ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
A_ : Optional[Any] = {"""image-segmentation""": UperNetForSemanticSegmentation} if is_torch_available() else {}
A_ : List[Any] = False
A_ : List[Any] = False
A_ : int = False
A_ : Optional[Any] = False
A_ : Optional[int] = False
A_ : str = False
def __lowerCAmelCase ( self : Any ) -> List[str]:
__magic_name__ : List[str] = UperNetModelTester(self )
__magic_name__ : str = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
return
def __lowerCAmelCase ( self : Any ) -> Dict:
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = model_class(_A )
__magic_name__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def __lowerCAmelCase ( self : Any ) -> str:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __lowerCAmelCase ( self : Dict ) -> Dict:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowerCAmelCase ( self : Any ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self : str ) -> Optional[int]:
def check_hidden_states_output(_A : int , _A : Union[str, Any] , _A : int ):
__magic_name__ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__magic_name__ : Dict = model(**self._prepare_for_class(_A , _A ) )
__magic_name__ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ : str = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : str = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : List[Any] = True
check_hidden_states_output(_A , _A , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Any = _config_zero_init(_A )
__magic_name__ : str = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class(config=_A )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason='UperNet does not have tied weights' )
def __lowerCAmelCase ( self : int ) -> List[str]:
pass
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[Any] = UperNetForSemanticSegmentation.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[str] = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
__magic_name__ : List[str] = Image.open(lowerCAmelCase ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
__magic_name__ : Optional[int] = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
__magic_name__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(_A )
__magic_name__ : Union[str, Any] = prepare_img()
__magic_name__ : List[Any] = processor(images=_A , return_tensors='pt' ).to(_A )
with torch.no_grad():
__magic_name__ : List[str] = model(**_A )
__magic_name__ : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
__magic_name__ : Optional[int] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : int = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
__magic_name__ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(_A )
__magic_name__ : Optional[int] = prepare_img()
__magic_name__ : List[str] = processor(images=_A , return_tensors='pt' ).to(_A )
with torch.no_grad():
__magic_name__ : Any = model(**_A )
__magic_name__ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , _A )
__magic_name__ : int = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1E-4 ) ) | 331 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , _A : Dict , _A : int=13 , _A : Any=30 , _A : List[str]=2 , _A : Any=3 , _A : List[Any]=True , _A : Tuple=True , _A : Union[str, Any]=32 , _A : Tuple=2 , _A : str=4 , _A : Optional[int]=37 , _A : Optional[int]="gelu" , _A : Dict=0.1 , _A : Union[str, Any]=0.1 , _A : str=10 , _A : Dict=0.02 , _A : Optional[int]=3 , _A : Any=None , ) -> Tuple:
__magic_name__ : List[str] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = image_size
__magic_name__ : List[Any] = patch_size
__magic_name__ : Optional[Any] = num_channels
__magic_name__ : List[Any] = is_training
__magic_name__ : Tuple = use_labels
__magic_name__ : Tuple = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : int = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : Dict = type_sequence_label_size
__magic_name__ : str = initializer_range
__magic_name__ : List[str] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Optional[int] = (image_size // patch_size) ** 2
__magic_name__ : Any = num_patches + 1
def __lowerCAmelCase ( self : Tuple ) -> Any:
__magic_name__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[int] = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Any ) -> Dict:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : Dict , _A : List[str] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = TFViTModel(config=_A )
__magic_name__ : Optional[Any] = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__magic_name__ : str = self.image_size // 2
__magic_name__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
__magic_name__ : Optional[Any] = model(_A , interpolate_pos_encoding=_A , training=_A )
__magic_name__ : Tuple = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : List[str] , _A : int ) -> int:
__magic_name__ : Optional[Any] = self.type_sequence_label_size
__magic_name__ : Tuple = TFViTForImageClassification(_A )
__magic_name__ : Dict = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__magic_name__ : List[Any] = self.image_size // 2
__magic_name__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
__magic_name__ : Tuple = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : List[Any] = 1
__magic_name__ : Any = TFViTForImageClassification(_A )
__magic_name__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
__magic_name__ : List[Any] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = config_and_inputs
__magic_name__ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
A_ : Any = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
A_ : Union[str, Any] = False
A_ : Optional[Any] = False
A_ : str = False
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
__magic_name__ : Optional[Any] = TFViTModelTester(self )
__magic_name__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowerCAmelCase ( self : List[str] ) -> Any:
pass
def __lowerCAmelCase ( self : int ) -> Optional[int]:
__magic_name__ , __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[str] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def __lowerCAmelCase ( self : Dict ) -> int:
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(_A )
__magic_name__ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : List[Any] = [*signature.parameters.keys()]
__magic_name__ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : List[str] = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : int ) -> Dict:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__magic_name__ : List[str] = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
__magic_name__ : str = self.default_image_processor
__magic_name__ : Optional[Any] = prepare_img()
__magic_name__ : str = image_processor(images=_A , return_tensors='tf' )
# forward pass
__magic_name__ : Dict = model(**_A )
# verify the logits
__magic_name__ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__magic_name__ : Optional[int] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 ) | 331 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowerCamelCase :
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : str ) -> Any:
return None
class _lowerCamelCase :
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : List[Any] ) -> Dict:
return None
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A , 'tf' , 12 , **_A )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(_A , 'pt' , 12 , **_A )
@require_torch
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
from transformers import BertModel
__magic_name__ : Tuple = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(_A ) )
vocab_file.flush()
__magic_name__ : int = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__magic_name__ : Tuple = BertModel(BertConfig(vocab_size=len(_A ) ) )
model.save_pretrained(_A )
self._test_export(_A , 'pt' , 12 , _A )
@require_tf
@slow
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__magic_name__ : Tuple = self._test_export(_A , 'tf' , 12 , **_A )
__magic_name__ : Union[str, Any] = quantize(Path(_A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__magic_name__ : Union[str, Any] = self._test_export(_A , 'pt' , 12 , **_A )
__magic_name__ : List[Any] = quantize(_A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(_A ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : str , _A : Tuple , _A : str=None , **_A : Optional[int] ) -> Union[str, Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
__magic_name__ : int = Path(_A ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(_A , _A , _A , _A , _A , **_A )
return path
except Exception as e:
self.fail(_A )
@require_torch
@require_tokenizers
@slow
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
from transformers import BertModel
__magic_name__ : str = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__magic_name__ : int = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_A , _A , 'pt' )
@require_tf
@require_tokenizers
@slow
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from transformers import TFBertModel
__magic_name__ : int = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
__magic_name__ : Optional[Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(_A , _A , 'tf' )
def __lowerCAmelCase ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : List[Any] = FeatureExtractionPipeline(_A , _A )
__magic_name__ : str = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[str] = infer_shapes(_A , _A )
# Assert all variables are present
self.assertEqual(len(_A ) , len(_A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , _A )
self.assertSequenceEqual(variable_names[3:] , _A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def __lowerCAmelCase ( self : str ) -> int:
__magic_name__ : Union[str, Any] = ['input_ids', 'attention_mask', 'token_type_ids']
__magic_name__ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
__magic_name__ , __magic_name__ : Optional[Any] = ensure_valid_input(FuncContiguousArgs() , _A , _A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(_A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(_A ) , set(_A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(_A , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__magic_name__ , __magic_name__ : Any = ensure_valid_input(FuncNonContiguousArgs() , _A , _A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(_A ) , 1 )
self.assertEqual(len(_A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : List[str] = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() ) | 331 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Optional[int] = word.split()
def justify(lowerCAmelCase : list , lowerCAmelCase : int , lowerCAmelCase : int ) -> str:
__magic_name__ : Optional[Any] = max_width - width
__magic_name__ : List[str] = len(lowerCAmelCase )
if len(lowerCAmelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
__magic_name__ : Optional[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
__magic_name__ : Any = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
__magic_name__ : Union[str, Any] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowerCAmelCase ):
num_spaces_between_words_list[i] += 1
__magic_name__ : str = []
for i in range(lowerCAmelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowerCAmelCase )
__magic_name__ : Any = []
__magic_name__ : list[str] = []
__magic_name__ : Any = 0
for word in words:
if width + len(lowerCAmelCase ) + len(lowerCAmelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowerCAmelCase )
width += len(lowerCAmelCase )
else:
# justify the line and add it to result
answer.append(justify(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) )
# reset new line and new width
__magic_name__ , __magic_name__ : List[str] = [word], len(lowerCAmelCase )
__magic_name__ : List[str] = max_width - width - len(lowerCAmelCase )
answer.append(' '.join(lowerCAmelCase ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list ):
"""simple docstring"""
_enforce_args(lowerCAmelCase , lowerCAmelCase )
if n == 0:
return 0
__magic_name__ : List[str] = float('-inf' )
for i in range(1 , n + 1 ):
__magic_name__ : List[str] = max(
lowerCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowerCAmelCase ) )
return max_revue
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list ):
"""simple docstring"""
_enforce_args(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[Any] = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list , lowerCAmelCase : list ):
"""simple docstring"""
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__magic_name__ : List[str] = float('-inf' )
for i in range(1 , n + 1 ):
__magic_name__ : Any = max(
lowerCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowerCAmelCase , lowerCAmelCase ) , )
__magic_name__ : Optional[Any] = max_revenue
return max_rev[n]
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list ):
"""simple docstring"""
_enforce_args(lowerCAmelCase , lowerCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__magic_name__ : List[Any] = [float('-inf' ) for _ in range(n + 1 )]
__magic_name__ : Optional[Any] = 0
for i in range(1 , n + 1 ):
__magic_name__ : int = max_rev[i]
for j in range(1 , i + 1 ):
__magic_name__ : int = max(lowerCAmelCase , prices[j - 1] + max_rev[i - j] )
__magic_name__ : Optional[Any] = max_revenue_i
return max_rev[n]
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : list ):
"""simple docstring"""
if n < 0:
__magic_name__ : List[str] = f'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowerCAmelCase )
if n > len(lowerCAmelCase ):
__magic_name__ : Dict = (
'Each integral piece of rod must have a corresponding price. '
f'Got n = {n} but length of prices = {len(lowerCAmelCase )}'
)
raise ValueError(lowerCAmelCase )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = [6, 10, 12, 15, 20, 23]
__magic_name__ : Union[str, Any] = len(lowerCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__magic_name__ : List[Any] = 36
__magic_name__ : Optional[int] = top_down_cut_rod(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : List[str] = bottom_up_cut_rod(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Optional[int] = naive_cut_rod_recursive(lowerCAmelCase , lowerCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main() | 331 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return str(lowerCAmelCase ) == str(lowerCAmelCase )[::-1]
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return int(lowerCAmelCase ) + int(str(lowerCAmelCase )[::-1] )
def lowerCamelCase ( lowerCAmelCase : int = 1_0000 ):
"""simple docstring"""
__magic_name__ : str = []
for num in range(1 , lowerCAmelCase ):
__magic_name__ : Optional[int] = 0
__magic_name__ : Dict = num
while iterations < 50:
__magic_name__ : Union[str, Any] = sum_reverse(lowerCAmelCase )
iterations += 1
if is_palindrome(lowerCAmelCase ):
break
else:
lychrel_nums.append(lowerCAmelCase )
return len(lowerCAmelCase )
if __name__ == "__main__":
print(F'{solution() = }') | 331 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 1 |
'''simple docstring'''
lowerCAmelCase :Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase :Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase :Tuple = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
} | 331 |
'''simple docstring'''
class _lowerCamelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
__magic_name__ : Tuple = row
__magic_name__ : str = col
__magic_name__ : Optional[Any] = graph
def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
__magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1]
__magic_name__ : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands.
__magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__magic_name__ : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 331 | 1 |
'''simple docstring'''
lowerCAmelCase :Tuple = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def lowerCamelCase ( lowerCAmelCase : bytes ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : Dict = f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(lowerCAmelCase )
__magic_name__ : Any = ''.join(bin(lowerCAmelCase )[2:].zfill(8 ) for byte in data )
__magic_name__ : Optional[Any] = len(lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__magic_name__ : Union[str, Any] = b'=' * ((6 - len(lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase ) % 6)
else:
__magic_name__ : Optional[int] = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) and not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : List[str] = (
'argument should be a bytes-like object or ASCII string, '
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase , lowerCAmelCase ):
try:
__magic_name__ : Dict = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__magic_name__ : Union[str, Any] = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__magic_name__ : str = encoded_data[:-padding]
__magic_name__ : Optional[Any] = ''.join(
bin(B64_CHARSET.index(lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__magic_name__ : Any = ''.join(
bin(B64_CHARSET.index(lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
__magic_name__ : Dict = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowerCAmelCase ) , 8 )
]
return bytes(lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Any = """microsoft/speecht5_tts"""
A_ : Union[str, Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
A_ : List[str] = """text_reader"""
A_ : List[str] = SpeechTaProcessor
A_ : List[str] = SpeechTaForTextToSpeech
A_ : str = SpeechTaHifiGan
A_ : List[str] = ["""text"""]
A_ : Tuple = ["""audio"""]
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
if self.post_processor is None:
__magic_name__ : Optional[int] = 'microsoft/speecht5_hifigan'
super().setup()
def __lowerCAmelCase ( self : Optional[Any] , _A : List[str] , _A : int=None ) -> List[str]:
__magic_name__ : str = self.pre_processor(text=_A , return_tensors='pt' , truncation=_A )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
__magic_name__ : List[Any] = load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
__magic_name__ : Optional[int] = torch.tensor(embeddings_dataset[7305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[Any]:
with torch.no_grad():
return self.model.generate_speech(**_A )
def __lowerCAmelCase ( self : Tuple , _A : Any ) -> Any:
with torch.no_grad():
return self.post_processor(_A ).cpu().detach() | 331 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 331 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : torch.FloatTensor
class _lowerCamelCase ( lowercase__ , lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , _A : int = 3 , _A : int = 3 , _A : Tuple[str] = ("DownEncoderBlock2D",) , _A : Tuple[str] = ("UpDecoderBlock2D",) , _A : Tuple[int] = (64,) , _A : int = 1 , _A : str = "silu" , _A : int = 3 , _A : int = 32 , _A : int = 256 , _A : int = 32 , _A : Optional[int] = None , _A : float = 0.1_8215 , _A : str = "group" , ) -> Optional[int]:
super().__init__()
# pass init params to Encoder
__magic_name__ : int = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
__magic_name__ : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels
__magic_name__ : int = nn.Convad(_A , _A , 1 )
__magic_name__ : str = VectorQuantizer(_A , _A , beta=0.25 , remap=_A , sane_index_shape=_A )
__magic_name__ : Optional[Any] = nn.Convad(_A , _A , 1 )
# pass init params to Decoder
__magic_name__ : int = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , norm_type=_A , )
@apply_forward_hook
def __lowerCAmelCase ( self : Optional[int] , _A : torch.FloatTensor , _A : bool = True ) -> VQEncoderOutput:
__magic_name__ : Union[str, Any] = self.encoder(_A )
__magic_name__ : Any = self.quant_conv(_A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_A )
@apply_forward_hook
def __lowerCAmelCase ( self : Optional[int] , _A : torch.FloatTensor , _A : bool = False , _A : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
__magic_name__ , __magic_name__ , __magic_name__ : Any = self.quantize(_A )
else:
__magic_name__ : Dict = h
__magic_name__ : List[str] = self.post_quant_conv(_A )
__magic_name__ : int = self.decoder(_A , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def __lowerCAmelCase ( self : Optional[int] , _A : torch.FloatTensor , _A : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
__magic_name__ : Dict = sample
__magic_name__ : str = self.encode(_A ).latents
__magic_name__ : Any = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A ) | 331 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 1 |
'''simple docstring'''
lowerCAmelCase :List[str] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase :Union[str, Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
} | 331 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
__magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__magic_name__ : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 | 1 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
if isinstance(lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _lowerCamelCase :
'''simple docstring'''
def __lowerCAmelCase ( self : Any , _A : str , _A : List[Any] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : Any ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : Optional[int] , _A : np.ndarray , _A : np.ndarray , _A : float ) -> Optional[Any]:
__magic_name__ : str = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , F'Difference between torch and flax is {diff} (>= {tol}).' )
def __lowerCAmelCase ( self : str , _A : Optional[Any] , _A : Union[str, Any] , _A : List[Any] , _A : Dict , _A : Optional[int]=None , **_A : List[Any] ) -> List[Any]:
__magic_name__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__magic_name__ : List[Any] = FlaxVisionTextDualEncoderModel(_A )
__magic_name__ : Any = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowerCAmelCase ( self : List[Any] , _A : Dict , _A : List[Any] , _A : Optional[int] , _A : int , _A : Union[str, Any]=None , **_A : Any ) -> List[str]:
__magic_name__ , __magic_name__ : Union[str, Any] = self.get_vision_text_model(_A , _A )
__magic_name__ : Optional[Any] = {'vision_model': vision_model, 'text_model': text_model}
__magic_name__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__magic_name__ : Union[str, Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self : str , _A : Optional[Any] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] , _A : Any=None , **_A : List[Any] ) -> Any:
__magic_name__ , __magic_name__ : Optional[int] = self.get_vision_text_model(_A , _A )
__magic_name__ : Dict = {'vision_model': vision_model, 'text_model': text_model}
__magic_name__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__magic_name__ : List[Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__magic_name__ : List[str] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__magic_name__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(_A )
__magic_name__ : Any = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__magic_name__ : int = after_output[0]
__magic_name__ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-3 )
def __lowerCAmelCase ( self : Union[str, Any] , _A : int , _A : str , _A : str , _A : Tuple , _A : int=None , **_A : Optional[int] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ : Optional[Any] = self.get_vision_text_model(_A , _A )
__magic_name__ : int = {'vision_model': vision_model, 'text_model': text_model}
__magic_name__ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__magic_name__ : Dict = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__magic_name__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : List[Any] = to_atuple(vision_model.config.image_size )
__magic_name__ : Any = to_atuple(vision_model.config.patch_size )
__magic_name__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ : Optional[int] = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : List[Any] , _A : List[Any] ) -> List[str]:
pt_model.to(_A )
pt_model.eval()
# prepare inputs
__magic_name__ : int = inputs_dict
__magic_name__ : Dict = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__magic_name__ : Optional[Any] = pt_model(**_A ).to_tuple()
__magic_name__ : Optional[int] = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_A , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
__magic_name__ : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A )
__magic_name__ : Union[str, Any] = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_A , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
__magic_name__ : str = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A )
pt_model_loaded.to(_A )
pt_model_loaded.eval()
with torch.no_grad():
__magic_name__ : List[Any] = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4E-2 )
def __lowerCAmelCase ( self : Optional[Any] , _A : List[str] , _A : Optional[Any] , _A : Tuple ) -> Union[str, Any]:
__magic_name__ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__magic_name__ : str = VisionTextDualEncoderModel(_A )
__magic_name__ : Tuple = FlaxVisionTextDualEncoderModel(_A )
__magic_name__ : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A )
__magic_name__ : List[Any] = fx_state
self.check_pt_flax_equivalence(_A , _A , _A )
def __lowerCAmelCase ( self : str , _A : str , _A : Any , _A : Optional[Any] ) -> List[str]:
__magic_name__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__magic_name__ : Any = VisionTextDualEncoderModel(_A )
__magic_name__ : Optional[Any] = FlaxVisionTextDualEncoderModel(_A )
__magic_name__ : str = load_flax_weights_in_pytorch_model(_A , fx_model.params )
self.check_pt_flax_equivalence(_A , _A , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
__magic_name__ : Tuple = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
__magic_name__ : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@is_pt_flax_cross_test
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.prepare_config_and_inputs()
__magic_name__ : Optional[int] = config_inputs_dict.pop('vision_config' )
__magic_name__ : Tuple = config_inputs_dict.pop('text_config' )
__magic_name__ : Tuple = config_inputs_dict
self.check_equivalence_pt_to_flax(_A , _A , _A )
self.check_equivalence_flax_to_pt(_A , _A , _A )
@slow
def __lowerCAmelCase ( self : int ) -> Optional[int]:
__magic_name__ , __magic_name__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
__magic_name__ : Dict = model_a(**_A )
__magic_name__ : Dict = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__magic_name__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(_A )
__magic_name__ : Dict = model_a(**_A )
__magic_name__ : Optional[int] = after_outputs[0]
__magic_name__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@require_flax
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_A , text_from_pt=_A , )
__magic_name__ : Optional[int] = 13
__magic_name__ : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__magic_name__ : Union[str, Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__magic_name__ : str = random_attention_mask([batch_size, 4] )
__magic_name__ : List[Any] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : str , _A : Any , _A : List[str] ) -> List[str]:
__magic_name__ : Any = FlaxViTModel(_A )
__magic_name__ : Tuple = FlaxBertModel(_A )
return vision_model, text_model
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ : Dict = FlaxViTModelTester(self )
__magic_name__ : List[str] = FlaxBertModelTester(self )
__magic_name__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
__magic_name__ : str = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ : Any = vision_config_and_inputs
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
__magic_name__ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_A , text_from_pt=_A , )
__magic_name__ : Dict = 13
__magic_name__ : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__magic_name__ : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__magic_name__ : str = random_attention_mask([batch_size, 4] )
__magic_name__ : Dict = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : List[Any] , _A : str , _A : Any ) -> Any:
__magic_name__ : Union[str, Any] = FlaxCLIPVisionModel(_A )
__magic_name__ : str = FlaxBertModel(_A )
return vision_model, text_model
def __lowerCAmelCase ( self : str ) -> Dict:
__magic_name__ : Dict = FlaxCLIPVisionModelTester(self )
__magic_name__ : List[str] = FlaxBertModelTester(self )
__magic_name__ : Dict = clip_model_tester.prepare_config_and_inputs()
__magic_name__ : Tuple = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ : Dict = vision_config_and_inputs
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
__magic_name__ : List[str] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
__magic_name__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__magic_name__ : Dict = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_A , padding=_A , return_tensors='np' )
__magic_name__ : Optional[int] = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ : str = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1E-3 ) ) | 331 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 331 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase :Tuple = {
'''microsoft/markuplm-base''': '''https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json''',
'''microsoft/markuplm-large''': '''https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json''',
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = """markuplm"""
def __init__( self : str , _A : Any=30522 , _A : Optional[Any]=768 , _A : List[Any]=12 , _A : str=12 , _A : str=3072 , _A : Tuple="gelu" , _A : int=0.1 , _A : int=0.1 , _A : Tuple=512 , _A : str=2 , _A : Dict=0.02 , _A : int=1E-12 , _A : int=0 , _A : int=0 , _A : Optional[int]=2 , _A : Dict=256 , _A : Dict=1024 , _A : Any=216 , _A : List[str]=1001 , _A : Dict=32 , _A : Optional[Any]=50 , _A : Optional[int]="absolute" , _A : Dict=True , _A : Tuple=None , **_A : Tuple , ) -> Any:
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
__magic_name__ : int = vocab_size
__magic_name__ : str = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = hidden_act
__magic_name__ : Dict = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Dict = layer_norm_eps
__magic_name__ : str = position_embedding_type
__magic_name__ : Union[str, Any] = use_cache
__magic_name__ : int = classifier_dropout
# additional properties
__magic_name__ : Dict = max_depth
__magic_name__ : List[str] = max_xpath_tag_unit_embeddings
__magic_name__ : Tuple = max_xpath_subs_unit_embeddings
__magic_name__ : Optional[Any] = tag_pad_id
__magic_name__ : List[Any] = subs_pad_id
__magic_name__ : int = xpath_unit_hidden_size | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , _A : Any , _A : int=99 , _A : Dict=13 , _A : Tuple=16 , _A : Union[str, Any]=7 , _A : List[Any]=True , _A : List[Any]=True , _A : str=True , _A : int=False , _A : Optional[Any]=True , _A : Any=2 , _A : Optional[int]=32 , _A : Dict=4 , _A : Any=4 , _A : Tuple=30 , _A : int=0 , _A : Any=1 , _A : Dict=2 , _A : Optional[Any]=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : str = decoder_seq_length
# For common tests
__magic_name__ : Union[str, Any] = self.decoder_seq_length
__magic_name__ : Optional[int] = is_training
__magic_name__ : Optional[int] = use_attention_mask
__magic_name__ : Dict = use_labels
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Any = d_model
__magic_name__ : List[str] = d_model
__magic_name__ : Dict = decoder_layers
__magic_name__ : Optional[int] = decoder_layers
__magic_name__ : int = decoder_ffn_dim
__magic_name__ : Optional[Any] = decoder_attention_heads
__magic_name__ : Union[str, Any] = decoder_attention_heads
__magic_name__ : Any = eos_token_id
__magic_name__ : Optional[Any] = bos_token_id
__magic_name__ : Any = pad_token_id
__magic_name__ : Tuple = decoder_start_token_id
__magic_name__ : Tuple = use_cache
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : List[str] = None
__magic_name__ : Any = decoder_seq_length
__magic_name__ : str = 2
__magic_name__ : Any = 1
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
__magic_name__ : str = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_attention_mask:
__magic_name__ : int = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__magic_name__ : Dict = None
if self.use_labels:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__magic_name__ : Dict = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __lowerCAmelCase ( self : str , _A : Tuple , _A : Optional[Any] , _A : Any , _A : Optional[Any] , ) -> Optional[int]:
__magic_name__ : Optional[int] = True
__magic_name__ : List[str] = TrOCRDecoder(config=_A ).to(_A ).eval()
__magic_name__ : Union[str, Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__magic_name__ : str = model(_A , use_cache=_A )
__magic_name__ : Dict = model(_A )
__magic_name__ : Optional[int] = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
__magic_name__ : Tuple = outputs['past_key_values']
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Union[str, Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__magic_name__ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ : Union[str, Any] = model(_A )['last_hidden_state']
__magic_name__ : int = model(_A , past_key_values=_A )['last_hidden_state']
# select random slice
__magic_name__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__magic_name__ : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_A , _A , atol=1E-3 )
def __lowerCAmelCase ( self : Dict ) -> str:
__magic_name__ : Tuple = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[Any] = config_and_inputs
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
A_ : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
A_ : Optional[int] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
A_ : List[str] = True
A_ : List[str] = False
def __lowerCAmelCase ( self : str ) -> int:
__magic_name__ : Dict = TrOCRStandaloneDecoderModelTester(self , is_training=_A )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
pass
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
pass
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> Any:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
return
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
pass | 331 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 1 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCAmelCase :Union[str, Any] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _lowerCamelCase :
'''simple docstring'''
A_ : str
A_ : Optional[str] = None
A_ : Optional[Union[str, int]] = None
A_ : Optional[Union[str, int]] = None
A_ : Optional[Union[str, int]] = None
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ , __magic_name__ , __magic_name__ : Dict = _str_to_version_tuple(self.version_str )
def __repr__( self : List[str] ) -> Optional[int]:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.major, self.minor, self.patch
def __lowerCAmelCase ( self : List[Any] , _A : Tuple ) -> List[Any]:
if isinstance(_A , _A ):
return Version(_A )
elif isinstance(_A , _A ):
return other
raise TypeError(F'{other} (type {type(_A )}) cannot be compared to version.' )
def __eq__( self : List[Any] , _A : Any ) -> Optional[int]:
try:
__magic_name__ : Tuple = self._validate_operand(_A )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] , _A : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : List[Any] = self._validate_operand(_A )
return self.tuple < other.tuple
def __hash__( self : int ) -> Optional[Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , _A : List[Any] ) -> Union[str, Any]:
__magic_name__ : int = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
return self.version_str
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = _VERSION_REG.match(lowerCAmelCase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(lowerCAmelCase ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
return ".".join(str(lowerCAmelCase ) for v in version_tuple ) | 331 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase :int = '''pt'''
elif is_tf_available():
lowerCAmelCase :Optional[Any] = '''tf'''
else:
lowerCAmelCase :Optional[Any] = '''jax'''
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ByTaTokenizer
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__magic_name__ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ : Optional[Any] = []
for i in range(len(_A ) ):
try:
__magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__magic_name__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__magic_name__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ : List[str] = [t[0] for t in toks]
# Ensure consistency
__magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__magic_name__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__magic_name__ : Union[str, Any] = ' ' + output_txt
__magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Any = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.ta_base_tokenizer
__magic_name__ : Optional[int] = 'Unicode €.'
__magic_name__ : Optional[Any] = tokenizer(_A )
__magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : Any = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__magic_name__ : Any = tokenizer('e è é ê ë' )
__magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : List[str] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : List[Any] = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__magic_name__ : str = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = self.ta_base_tokenizer
__magic_name__ : Tuple = [
'Summary of the text.',
'Another summary.',
]
__magic_name__ : Dict = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : Any = ['A long paragraph for summarization. </s>']
__magic_name__ : List[str] = ['Summary of the text. </s>']
# fmt: off
__magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__magic_name__ : str = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def __lowerCAmelCase ( self : Any ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )]
__magic_name__ : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__magic_name__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__magic_name__ : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> int:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> int:
pass
def __lowerCAmelCase ( self : str ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__magic_name__ : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ : List[str] = 0
__magic_name__ : str = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 331 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase :Tuple = logging.get_logger(__name__)
lowerCAmelCase :Optional[int] = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCAmelCase :Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__magic_name__ : List[Any] = model_type_to_module_name(lowerCAmelCase )
__magic_name__ : Optional[int] = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(lowerCAmelCase , lowerCAmelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase , '__name__' , lowerCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__magic_name__ : Any = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase , lowerCAmelCase ):
return getattr(lowerCAmelCase , lowerCAmelCase )
return None
def lowerCamelCase ( lowerCAmelCase : Union[str, os.PathLike] , lowerCAmelCase : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[Dict[str, str]] = None , lowerCAmelCase : Optional[Union[bool, str]] = None , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : bool = False , **lowerCAmelCase : Tuple , ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = get_file_from_repo(
lowerCAmelCase , lowerCAmelCase , cache_dir=lowerCAmelCase , force_download=lowerCAmelCase , resume_download=lowerCAmelCase , proxies=lowerCAmelCase , use_auth_token=lowerCAmelCase , revision=lowerCAmelCase , local_files_only=lowerCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase )
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : int ) -> Any:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_A )
def __lowerCAmelCase ( cls : Optional[int] , _A : Optional[Any] , **_A : Dict ) -> List[str]:
__magic_name__ : int = kwargs.pop('config' , _A )
__magic_name__ : int = kwargs.pop('trust_remote_code' , _A )
__magic_name__ : Any = True
__magic_name__ , __magic_name__ : List[Any] = ImageProcessingMixin.get_image_processor_dict(_A , **_A )
__magic_name__ : Union[str, Any] = config_dict.get('image_processor_type' , _A )
__magic_name__ : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
__magic_name__ : Dict = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__magic_name__ : str = config_dict.pop('feature_extractor_type' , _A )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
__magic_name__ : Dict = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__magic_name__ : str = config_dict['auto_map']['AutoFeatureExtractor']
__magic_name__ : Optional[Any] = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_A , _A ):
__magic_name__ : Tuple = AutoConfig.from_pretrained(_A , **_A )
# It could be in `config.image_processor_type``
__magic_name__ : Any = getattr(_A , 'image_processor_type' , _A )
if hasattr(_A , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
__magic_name__ : Optional[Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
__magic_name__ : Any = image_processor_class_from_name(_A )
__magic_name__ : Optional[int] = image_processor_auto_map is not None
__magic_name__ : int = image_processor_class is not None or type(_A ) in IMAGE_PROCESSOR_MAPPING
__magic_name__ : Optional[int] = resolve_trust_remote_code(
_A , _A , _A , _A )
if has_remote_code and trust_remote_code:
__magic_name__ : Dict = get_class_from_dynamic_module(
_A , _A , **_A )
__magic_name__ : Tuple = kwargs.pop('code_revision' , _A )
if os.path.isdir(_A ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_A , **_A )
elif image_processor_class is not None:
return image_processor_class.from_dict(_A , **_A )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_A ) in IMAGE_PROCESSOR_MAPPING:
__magic_name__ : Optional[Any] = IMAGE_PROCESSOR_MAPPING[type(_A )]
return image_processor_class.from_dict(_A , **_A )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def __lowerCAmelCase ( _A : str , _A : Any ) -> Union[str, Any]:
IMAGE_PROCESSOR_MAPPING.register(_A , _A ) | 331 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :Any = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[str] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[Any] = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None ):
"""simple docstring"""
if "." in tensor_name:
__magic_name__ : List[str] = tensor_name.split('.' )
for split in splits[:-1]:
__magic_name__ : Optional[int] = getattr(lowerCAmelCase , lowerCAmelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
__magic_name__ : Dict = new_module
__magic_name__ : Optional[int] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
__magic_name__ : int = tensor_name in module._buffers
__magic_name__ : Dict = getattr(lowerCAmelCase , lowerCAmelCase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__magic_name__ : Optional[int] = False
__magic_name__ : Optional[Any] = False
if is_buffer or not is_bitsandbytes_available():
__magic_name__ : Optional[Any] = False
__magic_name__ : List[Any] = False
else:
__magic_name__ : Dict = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__magic_name__ : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__magic_name__ : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__magic_name__ : List[str] = old_value.to(lowerCAmelCase )
elif isinstance(lowerCAmelCase , torch.Tensor ):
__magic_name__ : List[str] = value.to('cpu' )
if value.dtype == torch.inta:
__magic_name__ : List[str] = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
__magic_name__ : Optional[int] = torch.tensor(lowerCAmelCase , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCAmelCase ) and fpaa_statistics is None:
__magic_name__ : Union[str, Any] = new_value.T
__magic_name__ : Dict = old_value.__dict__
if is_abit:
__magic_name__ : Dict = bnb.nn.IntaParams(lowerCAmelCase , requires_grad=lowerCAmelCase , **lowerCAmelCase ).to(lowerCAmelCase )
elif is_abit:
__magic_name__ : Optional[Any] = bnb.nn.Paramsabit(lowerCAmelCase , requires_grad=lowerCAmelCase , **lowerCAmelCase ).to(lowerCAmelCase )
__magic_name__ : Dict = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(lowerCAmelCase ) )
else:
if value is None:
__magic_name__ : Any = old_value.to(lowerCAmelCase )
elif isinstance(lowerCAmelCase , torch.Tensor ):
__magic_name__ : Optional[int] = value.to(lowerCAmelCase )
else:
__magic_name__ : List[str] = torch.tensor(lowerCAmelCase , device=lowerCAmelCase )
if is_buffer:
__magic_name__ : Optional[int] = new_value
else:
__magic_name__ : List[Any] = nn.Parameter(lowerCAmelCase , requires_grad=old_value.requires_grad )
__magic_name__ : Union[str, Any] = new_value
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str=None , lowerCAmelCase : Any=None , lowerCAmelCase : Dict=False ):
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
__magic_name__ : Any = []
current_key_name.append(lowerCAmelCase )
if (isinstance(lowerCAmelCase , nn.Linear ) or isinstance(lowerCAmelCase , lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ , __magic_name__ : Union[str, Any] = module.weight.shape
else:
__magic_name__ : Dict = module.in_features
__magic_name__ : Optional[Any] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__magic_name__ : List[Any] = bnb.nn.LinearabitLt(
lowerCAmelCase , lowerCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__magic_name__ : Union[str, Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__magic_name__ : Optional[Any] = bnb.nn.Linearabit(
lowerCAmelCase , lowerCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__magic_name__ : str = True
# Store the module class in case we need to transpose the weight later
__magic_name__ : int = type(lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCAmelCase )
if len(list(module.children() ) ) > 0:
__magic_name__ , __magic_name__ : List[str] = _replace_with_bnb_linear(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , has_been_replaced=lowerCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int=None , lowerCAmelCase : Dict=None ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
__magic_name__ , __magic_name__ : Dict = _replace_with_bnb_linear(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowerCamelCase ( *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[int] ):
"""simple docstring"""
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , lowerCAmelCase , )
return replace_with_bnb_linear(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase ( *lowerCAmelCase : Dict , **lowerCAmelCase : str ):
"""simple docstring"""
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , lowerCAmelCase , )
return set_module_quantized_tensor_to_device(*lowerCAmelCase , **lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : Dict = deepcopy(lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__magic_name__ : Dict = find_tied_parameters(lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__magic_name__ : Dict = sum(lowerCAmelCase , [] )
__magic_name__ : Any = len(lowerCAmelCase ) > 0
# Check if it is a base model
__magic_name__ : Optional[Any] = not hasattr(lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__magic_name__ : str = list(model.named_children() )
__magic_name__ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
__magic_name__ : List[str] = set(lowerCAmelCase ) - set(lowerCAmelCase )
__magic_name__ : Dict = list(set(lowerCAmelCase ) ) + list(lowerCAmelCase )
# remove ".weight" from the keys
__magic_name__ : Optional[int] = ['.weight', '.bias']
__magic_name__ : int = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__magic_name__ : int = name.replace(lowerCAmelCase , '' )
filtered_module_names.append(lowerCAmelCase )
return filtered_module_names | 331 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 ) | 331 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ : List[str] = 'huggingface/label-files'
__magic_name__ : List[str] = 'imagenet-1k-id2label.json'
__magic_name__ : int = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__magic_name__ : Dict = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
__magic_name__ : int = {v: k for k, v in idalabel.items()}
__magic_name__ : Optional[int] = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
__magic_name__ : str = BitConfig(
conv_layer=lowerCAmelCase , num_labels=1000 , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase , )
return config
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
if "stem.conv" in name:
__magic_name__ : Any = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
__magic_name__ : Any = name.replace('blocks' , 'layers' )
if "head.fc" in name:
__magic_name__ : Dict = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
__magic_name__ : List[Any] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
__magic_name__ : str = 'bit.encoder.' + name
return name
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__magic_name__ : Optional[Any] = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
__magic_name__ : int = get_config(lowerCAmelCase )
# load original model from timm
__magic_name__ : Any = create_model(lowerCAmelCase , pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model
__magic_name__ : Dict = timm_model.state_dict()
for key in state_dict.copy().keys():
__magic_name__ : Optional[Any] = state_dict.pop(lowerCAmelCase )
__magic_name__ : int = val.squeeze() if 'head' in key else val
# load HuggingFace model
__magic_name__ : int = BitForImageClassification(lowerCAmelCase )
model.eval()
model.load_state_dict(lowerCAmelCase )
# create image processor
__magic_name__ : int = create_transform(**resolve_data_config({} , model=lowerCAmelCase ) )
__magic_name__ : int = transform.transforms
__magic_name__ : List[Any] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
__magic_name__ : str = BitImageProcessor(
do_resize=lowerCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
__magic_name__ : int = prepare_img()
__magic_name__ : int = transform(lowerCAmelCase ).unsqueeze(0 )
__magic_name__ : Any = processor(lowerCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase , lowerCAmelCase )
# verify logits
with torch.no_grad():
__magic_name__ : Optional[Any] = model(lowerCAmelCase )
__magic_name__ : Tuple = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
__magic_name__ : str = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
lowerCAmelCase :int = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 331 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 1 |
'''simple docstring'''
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = torch.nn.Linear(10 , 10 )
__magic_name__ : Any = torch.optim.SGD(model.parameters() , 0.1 )
__magic_name__ : Dict = Accelerator()
__magic_name__ : int = accelerator.prepare(_A )
try:
pickle.loads(pickle.dumps(_A ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state() | 331 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase :Dict = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase :Dict = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowerCAmelCase :List[str] = os.environ.get('''USER_TOKEN''', '''''')
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Optional[Any] = {
'Authorization': f'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(lowerCAmelCase , headers=lowerCAmelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''') | 331 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 1 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return x + 2
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ : Optional[int] = 'x = 3'
__magic_name__ : str = {}
__magic_name__ : List[Any] = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'x': 3} )
__magic_name__ : str = 'x = y'
__magic_name__ : int = {'y': 5}
__magic_name__ : Any = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'x': 5, 'y': 5} )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
__magic_name__ : Union[str, Any] = 'y = add_two(x)'
__magic_name__ : Union[str, Any] = {'x': 3}
__magic_name__ : Tuple = evaluate(_A , {'add_two': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__magic_name__ : Tuple = evaluate(_A , {} , state=_A )
assert result is None
assert "tried to execute add_two" in out.out
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
__magic_name__ : List[Any] = 'x = 3'
__magic_name__ : Dict = {}
__magic_name__ : Optional[int] = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'x': 3} )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Dict = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
__magic_name__ : Optional[Any] = {'x': 3}
__magic_name__ : List[Any] = evaluate(_A , {'add_two': add_two} , state=_A )
self.assertDictEqual(_A , {'x': 3, 'y': 5} )
self.assertDictEqual(_A , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __lowerCAmelCase ( self : str ) -> List[str]:
__magic_name__ : Union[str, Any] = 'x = 3\ny = 5'
__magic_name__ : int = {}
__magic_name__ : int = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'x': 3, 'y': 5} )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = 'text = f\'This is x: {x}.\''
__magic_name__ : Any = {'x': 3}
__magic_name__ : str = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_A , {'x': 3, 'text': 'This is x: 3.'} )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
__magic_name__ : int = 'if x <= 3:\n y = 2\nelse:\n y = 5'
__magic_name__ : Any = {'x': 3}
__magic_name__ : Tuple = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_A , {'x': 3, 'y': 2} )
__magic_name__ : Any = {'x': 8}
__magic_name__ : Tuple = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'x': 8, 'y': 5} )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
__magic_name__ : Union[str, Any] = 'test_list = [x, add_two(x)]'
__magic_name__ : Optional[int] = {'x': 3}
__magic_name__ : int = evaluate(_A , {'add_two': add_two} , state=_A )
self.assertListEqual(_A , [3, 5] )
self.assertDictEqual(_A , {'x': 3, 'test_list': [3, 5]} )
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
__magic_name__ : str = 'y = x'
__magic_name__ : Optional[Any] = {'x': 3}
__magic_name__ : int = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'x': 3, 'y': 3} )
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ : List[str] = 'test_list = [x, add_two(x)]\ntest_list[1]'
__magic_name__ : int = {'x': 3}
__magic_name__ : List[Any] = evaluate(_A , {'add_two': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'x': 3, 'test_list': [3, 5]} )
__magic_name__ : Any = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
__magic_name__ : Optional[Any] = {'x': 3}
__magic_name__ : Dict = evaluate(_A , {'add_two': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ : Optional[Any] = 'x = 0\nfor i in range(3):\n x = i'
__magic_name__ : List[str] = {}
__magic_name__ : Optional[Any] = evaluate(_A , {'range': range} , state=_A )
assert result == 2
self.assertDictEqual(_A , {'x': 2, 'i': 2} ) | 331 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : Optional[int]=2 , _A : Union[str, Any]=3 , _A : Tuple=4 , _A : int=2 , _A : Optional[Any]=7 , _A : Dict=True , _A : Optional[int]=True , _A : Tuple=True , _A : Union[str, Any]=True , _A : Optional[int]=99 , _A : Union[str, Any]=36 , _A : List[Any]=2 , _A : str=4 , _A : int=37 , _A : Optional[Any]="gelu" , _A : Any=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=512 , _A : Any=16 , _A : List[str]=2 , _A : Tuple=0.02 , _A : List[str]=6 , _A : int=6 , _A : Tuple=3 , _A : Any=4 , _A : List[str]=None , _A : Dict=1000 , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : str = num_channels
__magic_name__ : List[str] = image_size
__magic_name__ : Union[str, Any] = patch_size
__magic_name__ : Optional[Any] = is_training
__magic_name__ : int = use_input_mask
__magic_name__ : Tuple = use_token_type_ids
__magic_name__ : Any = use_labels
__magic_name__ : List[str] = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : int = num_hidden_layers
__magic_name__ : List[str] = num_attention_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : List[str] = hidden_act
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : int = attention_probs_dropout_prob
__magic_name__ : Dict = max_position_embeddings
__magic_name__ : Optional[Any] = type_vocab_size
__magic_name__ : Optional[Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : List[Any] = coordinate_size
__magic_name__ : Optional[int] = shape_size
__magic_name__ : int = num_labels
__magic_name__ : int = num_choices
__magic_name__ : Optional[int] = scope
__magic_name__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__magic_name__ : int = text_seq_length
__magic_name__ : Optional[int] = (image_size // patch_size) ** 2 + 1
__magic_name__ : str = self.text_seq_length + self.image_seq_length
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__magic_name__ : int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__magic_name__ : Optional[Any] = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__magic_name__ : str = bbox[i, j, 3]
__magic_name__ : Dict = bbox[i, j, 1]
__magic_name__ : List[str] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__magic_name__ : Union[str, Any] = bbox[i, j, 2]
__magic_name__ : Optional[int] = bbox[i, j, 0]
__magic_name__ : Optional[int] = tmp_coordinate
__magic_name__ : Dict = tf.constant(_A )
__magic_name__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : str = None
if self.use_input_mask:
__magic_name__ : Tuple = random_attention_mask([self.batch_size, self.text_seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__magic_name__ : Optional[Any] = None
__magic_name__ : Optional[int] = None
if self.use_labels:
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__magic_name__ : List[Any] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Any , _A : Any , _A : List[Any] , _A : Union[str, Any] ) -> str:
__magic_name__ : Dict = TFLayoutLMvaModel(config=_A )
# text + image
__magic_name__ : Union[str, Any] = model(_A , pixel_values=_A , training=_A )
__magic_name__ : Any = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
__magic_name__ : Dict = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__magic_name__ : Union[str, Any] = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__magic_name__ : Optional[Any] = model({'pixel_values': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : Optional[Any] , _A : Dict , _A : str , _A : str , _A : Any , _A : Union[str, Any] , _A : Tuple ) -> List[str]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Tuple = TFLayoutLMvaForSequenceClassification(config=_A )
__magic_name__ : List[str] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Union[str, Any] , _A : Optional[Any] , _A : Optional[int] , _A : int , _A : Dict , _A : Tuple ) -> str:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : List[Any] = TFLayoutLMvaForTokenClassification(config=_A )
__magic_name__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : str , _A : Optional[Any] , _A : Optional[int] , _A : Dict , _A : Optional[Any] , _A : Tuple ) -> Union[str, Any]:
__magic_name__ : List[Any] = 2
__magic_name__ : Optional[int] = TFLayoutLMvaForQuestionAnswering(config=_A )
__magic_name__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
__magic_name__ : Any = self.prepare_config_and_inputs()
((__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__)) : List[str] = config_and_inputs
__magic_name__ : List[str] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ : str = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Dict = False
A_ : List[str] = False
def __lowerCAmelCase ( self : Any , _A : Dict , _A : str , _A : List[Any] , _A : List[str] , _A : Union[str, Any] ) -> Union[str, Any]:
return True
def __lowerCAmelCase ( self : str , _A : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ) -> dict:
__magic_name__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
__magic_name__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
__magic_name__ : List[str] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
__magic_name__ : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__magic_name__ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
__magic_name__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
__magic_name__ : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
__magic_name__ : Union[str, Any] = TFLayoutLMvaModelTester(self )
__magic_name__ : Union[str, Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Any = model_class(_A )
if getattr(_A , 'hf_compute_loss' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
__magic_name__ : int = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
__magic_name__ : Dict = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
__magic_name__ : Optional[int] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__magic_name__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
__magic_name__ : List[Any] = prepared_for_class.pop('input_ids' )
__magic_name__ : Tuple = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__magic_name__ : List[str] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
__magic_name__ : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__magic_name__ : Optional[Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__magic_name__ : str = -100
__magic_name__ : List[str] = tf.convert_to_tensor(_A )
__magic_name__ : Tuple = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__magic_name__ : Optional[Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
__magic_name__ : str = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__magic_name__ : List[Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
__magic_name__ : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__magic_name__ : Optional[Any] = inspect.signature(model.call ).parameters
__magic_name__ : Any = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__magic_name__ : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__magic_name__ : Any = signature_names.index(_A )
__magic_name__ : List[str] = label_key
__magic_name__ : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__magic_name__ : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__magic_name__ : List[str] = prepared_for_class[value]
__magic_name__ : int = tuple(_A )
# Send to model
__magic_name__ : str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ : str = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def __lowerCAmelCase ( self : int ) -> List[Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : str ) -> List[str]:
__magic_name__ : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__magic_name__ : Optional[Any] = self.default_image_processor
__magic_name__ : int = prepare_img()
__magic_name__ : List[str] = image_processor(images=_A , return_tensors='tf' ).pixel_values
__magic_name__ : int = tf.constant([[1, 2]] )
__magic_name__ : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__magic_name__ : Optional[int] = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
__magic_name__ : List[str] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
__magic_name__ : Dict = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1E-4 ) ) | 331 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 1 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _lowerCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = StableDiffusionControlNetImgaImgPipeline
A_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A_ : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
A_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__magic_name__ : Dict = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__magic_name__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__magic_name__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__magic_name__ : Any = CLIPTextModel(_A )
__magic_name__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__magic_name__ : List[str] = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> Tuple:
if str(_A ).startswith('mps' ):
__magic_name__ : int = torch.manual_seed(_A )
else:
__magic_name__ : Optional[int] = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[Any] = 2
__magic_name__ : int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , )
__magic_name__ : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(_A ) ).to(_A )
__magic_name__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : int = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((64, 64) )
__magic_name__ : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = StableDiffusionControlNetImgaImgPipeline
A_ : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
A_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
torch.manual_seed(0 )
__magic_name__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_A : Tuple ):
if isinstance(_A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__magic_name__ : Dict = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_A )
torch.manual_seed(0 )
__magic_name__ : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_A )
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__magic_name__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__magic_name__ : int = CLIPTextModel(_A )
__magic_name__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__magic_name__ : int = MultiControlNetModel([controlneta, controlneta] )
__magic_name__ : str = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : int , _A : Optional[int] , _A : List[Any]=0 ) -> str:
if str(_A ).startswith('mps' ):
__magic_name__ : List[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Dict = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : int = 2
__magic_name__ : List[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , ),
]
__magic_name__ : int = floats_tensor(control_image[0].shape , rng=random.Random(_A ) ).to(_A )
__magic_name__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__magic_name__ : Tuple = Image.fromarray(np.uinta(_A ) ).convert('RGB' ).resize((64, 64) )
__magic_name__ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[Any] = self.get_dummy_components()
__magic_name__ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
__magic_name__ : Dict = 10.0
__magic_name__ : int = 4
__magic_name__ : Dict = self.get_dummy_inputs(_A )
__magic_name__ : Optional[Any] = steps
__magic_name__ : Dict = scale
__magic_name__ : Union[str, Any] = pipe(**_A )[0]
__magic_name__ : Optional[Any] = self.get_dummy_inputs(_A )
__magic_name__ : Tuple = steps
__magic_name__ : int = scale
__magic_name__ : Any = pipe(**_A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__magic_name__ : Dict = self.get_dummy_inputs(_A )
__magic_name__ : Dict = steps
__magic_name__ : List[Any] = scale
__magic_name__ : Optional[int] = pipe(**_A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__magic_name__ : List[str] = self.get_dummy_inputs(_A )
__magic_name__ : List[str] = steps
__magic_name__ : Tuple = scale
__magic_name__ : str = pipe(**_A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
__magic_name__ : Any = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__magic_name__ : Optional[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_A , controlnet=_A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
__magic_name__ : Any = torch.Generator(device='cpu' ).manual_seed(0 )
__magic_name__ : Dict = 'evil space-punk bird'
__magic_name__ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__magic_name__ : List[Any] = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__magic_name__ : Union[str, Any] = pipe(
_A , _A , control_image=_A , generator=_A , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__magic_name__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
__magic_name__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9E-2 | 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCAmelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCAmelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCAmelCase )
return parser.parse_args()
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[Any] = parse_args()
# Import training_script as a module.
__magic_name__ : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__magic_name__ : str = script_fpath.stem
__magic_name__ : Optional[int] = importlib.import_module(lowerCAmelCase )
# Patch sys.argv
__magic_name__ : str = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 331 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : List[str] = u
for i in range(1 , lowerCAmelCase ):
__magic_name__ : str = temp * (u - i)
return temp
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = int(input('enter the numbers of values: ' ) )
__magic_name__ : list[list[float]] = []
for _ in range(lowerCAmelCase ):
y.append([] )
for i in range(lowerCAmelCase ):
for j in range(lowerCAmelCase ):
y[i].append(lowerCAmelCase )
__magic_name__ : Optional[int] = 0
print('enter the values of parameters in a list: ' )
__magic_name__ : Tuple = list(map(lowerCAmelCase , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(lowerCAmelCase ):
__magic_name__ : Dict = float(input() )
__magic_name__ : Tuple = int(input('enter the value to interpolate: ' ) )
__magic_name__ : str = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCAmelCase ):
for j in range(n - i ):
__magic_name__ : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
__magic_name__ : List[str] = y[0][0]
for i in range(1 , lowerCAmelCase ):
summ += (ucal(lowerCAmelCase , lowerCAmelCase ) * y[0][i]) / math.factorial(lowerCAmelCase )
print(f'the value at {value} is {summ}' )
if __name__ == "__main__":
main() | 331 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , _A : Tuple , _A : Union[str, Any]=3 , _A : Tuple=32 , _A : Any=3 , _A : Optional[int]=10 , _A : Dict=[10, 20, 30, 40] , _A : Optional[Any]=[1, 1, 2, 1] , _A : List[Any]=True , _A : str=True , _A : Any="relu" , _A : Optional[Any]=3 , _A : Dict=None , ) -> List[str]:
__magic_name__ : str = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Dict = image_size
__magic_name__ : int = num_channels
__magic_name__ : Tuple = embeddings_size
__magic_name__ : int = hidden_sizes
__magic_name__ : Optional[Any] = depths
__magic_name__ : Dict = is_training
__magic_name__ : Tuple = use_labels
__magic_name__ : Optional[int] = hidden_act
__magic_name__ : Any = num_labels
__magic_name__ : Union[str, Any] = scope
__magic_name__ : Union[str, Any] = len(_A )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
__magic_name__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : List[Any] = None
if self.use_labels:
__magic_name__ : Any = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : str = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCAmelCase ( self : Optional[int] , _A : int , _A : Tuple , _A : Tuple ) -> Tuple:
__magic_name__ : Any = TFResNetModel(config=_A )
__magic_name__ : List[str] = model(_A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self : Dict , _A : Any , _A : Optional[int] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Any = self.num_labels
__magic_name__ : int = TFResNetForImageClassification(_A )
__magic_name__ : Optional[int] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Any = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = config_and_inputs
__magic_name__ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A_ : int = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
A_ : List[str] = False
A_ : str = False
A_ : List[Any] = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = TFResNetModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=_A , has_text_modality=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : Any ) -> Tuple:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
__magic_name__ , __magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Tuple = model_class(_A )
__magic_name__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Any = [*signature.parameters.keys()]
__magic_name__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
def check_hidden_states_output(_A : List[Any] , _A : Any , _A : Dict ):
__magic_name__ : Any = model_class(_A )
__magic_name__ : Optional[int] = model(**self._prepare_for_class(_A , _A ) )
__magic_name__ : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Union[str, Any] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ : List[str] = layer_type
__magic_name__ : Dict = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : str = True
check_hidden_states_output(_A , _A , _A )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Any:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : List[str] = TFResNetModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : str ) -> List[str]:
__magic_name__ : Any = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__magic_name__ : Tuple = self.default_image_processor
__magic_name__ : Tuple = prepare_img()
__magic_name__ : Any = image_processor(images=_A , return_tensors='tf' )
# forward pass
__magic_name__ : Union[str, Any] = model(**_A )
# verify the logits
__magic_name__ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__magic_name__ : Any = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1E-4 ) ) | 331 |
'''simple docstring'''
class _lowerCamelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
__magic_name__ : Tuple = row
__magic_name__ : str = col
__magic_name__ : Optional[Any] = graph
def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
__magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1]
__magic_name__ : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands.
__magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__magic_name__ : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 331 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase :Any = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] , _A : str , _A : bool , _A : str = None , _A : list = None ) -> Dict:
__magic_name__ : List[str] = None
__magic_name__ : Tuple = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
__magic_name__ : List[str] = os.path.abspath('examples' )
for item in os.listdir(_A ):
if item not in EXCLUDE_EXAMPLES:
__magic_name__ : Any = os.path.join(_A , _A )
if os.path.isfile(_A ) and ".py" in item_path:
with self.subTest(
tested_script=_A , feature_script=_A , tested_section='main()' if parser_only else 'training_function()' , ):
__magic_name__ : Optional[Any] = compare_against_test(
os.path.join(_A , _A ) , _A , _A , _A )
__magic_name__ : str = '\n'.join(_A )
if special_strings is not None:
for string in special_strings:
__magic_name__ : Any = diff.replace(_A , '' )
self.assertEqual(_A , '' )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
self.one_complete_example('complete_nlp_example.py' , _A )
self.one_complete_example('complete_nlp_example.py' , _A )
def __lowerCAmelCase ( self : int ) -> List[Any]:
__magic_name__ : List[Any] = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
__magic_name__ : str = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , _A , _A , _A )
self.one_complete_example('complete_cv_example.py' , _A , _A , _A )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = False
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) -> List[str]:
super().setUpClass()
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : List[Any] = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__magic_name__ : Tuple = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : int ) -> str:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowerCAmelCase ( self : Dict ) -> int:
__magic_name__ : int = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
__magic_name__ : Any = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__magic_name__ : str = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __lowerCAmelCase ( self : str ) -> Tuple:
__magic_name__ : Union[str, Any] = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__magic_name__ : List[Any] = run_command(self._launch_args + testargs , return_stdout=_A )
self.assertNotIn('epoch 0:' , _A )
self.assertIn('epoch 1:' , _A )
def __lowerCAmelCase ( self : Dict ) -> Dict:
__magic_name__ : List[str] = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__magic_name__ : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=_A )
if torch.cuda.is_available():
__magic_name__ : Optional[int] = torch.cuda.device_count()
else:
__magic_name__ : Any = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , _A )
self.assertIn('epoch 1:' , _A )
else:
self.assertIn('epoch 0:' , _A )
self.assertIn('epoch 1:' , _A )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
__magic_name__ : str = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
__magic_name__ : List[str] = run_command(self._launch_args + testargs , return_stdout=_A )
__magic_name__ : List[str] = re.findall('({.+})' , _A )
__magic_name__ : Union[str, Any] = [r for r in results if 'accuracy' in r][-1]
__magic_name__ : List[str] = ast.literal_eval(_A )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
__magic_name__ : Optional[int] = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
__magic_name__ : Dict = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_A , 'tracking' ) ) )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : int = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __lowerCAmelCase ( self : Dict ) -> Dict:
__magic_name__ : List[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase :Optional[int] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase :Optional[int] = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase :Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Any = len([g for position, g in enumerate(lowerCAmelCase ) if g == main_target[position]] )
return (item, float(lowerCAmelCase ))
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = random.randint(0 , len(lowerCAmelCase ) - 1 )
__magic_name__ : List[Any] = parent_a[:random_slice] + parent_a[random_slice:]
__magic_name__ : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : list[str] ):
"""simple docstring"""
__magic_name__ : Optional[int] = list(lowerCAmelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__magic_name__ : Dict = random.choice(lowerCAmelCase )
return "".join(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : tuple[str, float] , lowerCAmelCase : list[tuple[str, float]] , lowerCAmelCase : list[str] , ):
"""simple docstring"""
__magic_name__ : int = []
# Generate more children proportionally to the fitness score.
__magic_name__ : List[str] = int(parent_a[1] * 100 ) + 1
__magic_name__ : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase ):
__magic_name__ : Optional[Any] = population_score[random.randint(0 , lowerCAmelCase )][0]
__magic_name__ , __magic_name__ : Any = crossover(parent_a[0] , lowerCAmelCase )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase , lowerCAmelCase ) )
pop.append(mutate(lowerCAmelCase , lowerCAmelCase ) )
return pop
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : list[str] , lowerCAmelCase : bool = True ):
"""simple docstring"""
if N_POPULATION < N_SELECTED:
__magic_name__ : List[str] = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(lowerCAmelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__magic_name__ : Tuple = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__magic_name__ : Union[str, Any] = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(lowerCAmelCase )
# Generate random starting population.
__magic_name__ : Any = []
for _ in range(lowerCAmelCase ):
population.append(''.join([random.choice(lowerCAmelCase ) for i in range(len(lowerCAmelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__magic_name__ , __magic_name__ : Dict = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__magic_name__ : int = [evaluate(lowerCAmelCase , lowerCAmelCase ) for item in population]
# Check if there is a matching evolution.
__magic_name__ : int = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] , reverse=lowerCAmelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__magic_name__ : Any = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase )
# Normalize population score to be between 0 and 1.
__magic_name__ : List[Any] = [
(item, score / len(lowerCAmelCase )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase ):
population.extend(select(population_score[int(lowerCAmelCase )] , lowerCAmelCase , lowerCAmelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase :int = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
lowerCAmelCase :str = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :int = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
) | 331 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCAmelCase :Optional[int] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : tuple , lowerCAmelCase : Path , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[str]=False , ):
"""simple docstring"""
output_path.parent.mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase , lowerCAmelCase , f=output_path.as_posix() , input_names=lowerCAmelCase , output_names=lowerCAmelCase , dynamic_axes=lowerCAmelCase , do_constant_folding=lowerCAmelCase , use_external_data_format=lowerCAmelCase , enable_onnx_checker=lowerCAmelCase , opset_version=lowerCAmelCase , )
else:
export(
lowerCAmelCase , lowerCAmelCase , f=output_path.as_posix() , input_names=lowerCAmelCase , output_names=lowerCAmelCase , dynamic_axes=lowerCAmelCase , do_constant_folding=lowerCAmelCase , opset_version=lowerCAmelCase , )
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : bool = False ):
"""simple docstring"""
__magic_name__ : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__magic_name__ : List[str] = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
__magic_name__ : Dict = 'cpu'
__magic_name__ : int = Path(lowerCAmelCase )
# VAE DECODER
__magic_name__ : int = AutoencoderKL.from_pretrained(model_path + '/vae' )
__magic_name__ : str = vae_decoder.config.latent_channels
# forward only through the decoder part
__magic_name__ : Dict = vae_decoder.decode
onnx_export(
lowerCAmelCase , model_args=(
torch.randn(1 , lowerCAmelCase , 25 , 25 ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowerCAmelCase , )
del vae_decoder
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase :Union[str, Any] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''') | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 331 | 1 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase :Any = logging.get_logger(__name__)
lowerCAmelCase :Tuple = '''▁'''
lowerCAmelCase :str = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
lowerCAmelCase :Tuple = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
lowerCAmelCase :Optional[Any] = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
lowerCAmelCase :List[Any] = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
lowerCAmelCase :Dict = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = ["input_ids"]
A_ : Any = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_INIT_CONFIGURATION
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , _A : str , _A : Any=None , _A : int=False , _A : Union[str, Any]="utf8" , _A : Optional[int]="[UNK]" , _A : List[Any]="[SEP]" , _A : List[str]="[PAD]" , _A : Dict="[CLS]" , _A : Optional[int]="[MASK]" , _A : Optional[Dict[str, Any]] = None , **_A : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__magic_name__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__magic_name__ : int = do_lower_case
__magic_name__ : Dict = sentencepiece_model_ckpt
__magic_name__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__magic_name__ : Optional[int] = self.load_vocab(filepath=_A )
else:
__magic_name__ : List[Any] = {self.sp_model.id_to_piece(_A ): id for id in range(self.sp_model.get_piece_size() )}
__magic_name__ : int = {v: k for k, v in self.vocab.items()}
def __lowerCAmelCase ( self : List[Any] , _A : Optional[int] ) -> Dict:
if text is None:
return None
__magic_name__ : Union[str, Any] = self.tokenize(_A )
__magic_name__ , __magic_name__ : Any = '', []
for i, ch in enumerate(_A ):
if ch in self.SP_CHAR_MAPPING:
__magic_name__ : int = self.SP_CHAR_MAPPING.get(_A )
else:
__magic_name__ : Dict = unicodedata.normalize('NFKC' , _A )
if self.is_whitespace(_A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A ) )
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
__magic_name__ : Dict = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__magic_name__ : int = token[1:]
__magic_name__ : List[Any] = text[offset:].index(_A ) + offset
__magic_name__ : Tuple = start + len(_A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__magic_name__ : Dict = end
return token_mapping
@property
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
return len(self.vocab )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Tuple ) -> Dict:
__magic_name__ : Tuple = self.__dict__.copy()
__magic_name__ : Optional[Any] = None
return state
def __setstate__( self : Optional[Any] , _A : Any ) -> List[str]:
__magic_name__ : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__magic_name__ : Dict = {}
__magic_name__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __lowerCAmelCase ( self : Optional[Any] , _A : List[Any] ) -> List[Any]:
return "".join((self.SP_CHAR_MAPPING.get(_A , _A ) for c in text) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int] , _A : List[Any]=False , _A : Optional[int]=64 , _A : Tuple=0.1 ) -> Dict:
if self.sp_model_kwargs.get('enable_sampling' ) is True:
__magic_name__ : int = True
if self.sp_model_kwargs.get('alpha' ) is not None:
__magic_name__ : Union[str, Any] = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
__magic_name__ : List[str] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
__magic_name__ : List[Any] = self.sp_model.EncodeAsPieces(_A )
else:
__magic_name__ : Any = self.sp_model.SampleEncodeAsPieces(_A , _A , _A )
__magic_name__ : Tuple = []
for pi, piece in enumerate(_A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A ) and pi != 0:
new_pieces.append(_A )
continue
else:
continue
__magic_name__ : str = 0
for i, chunk in enumerate(_A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A ) or self.is_punct(_A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_A )
__magic_name__ : Dict = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__magic_name__ : Union[str, Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__magic_name__ : List[Any] = i
if len(_A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __lowerCAmelCase ( self : List[Any] , _A : List[str] ) -> Any:
__magic_name__ : Any = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : int , _A : List[str] ) -> Tuple:
__magic_name__ : Optional[Any] = self.convert_ids_to_tokens(_A )
__magic_name__ : List[str] = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def __lowerCAmelCase ( self : str , _A : Union[str, Any] ) -> Dict:
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def __lowerCAmelCase ( self : str , _A : str ) -> Optional[Any]:
return self.reverse_vocab.get(_A , self.unk_token )
def __lowerCAmelCase ( self : Tuple , _A : Optional[int] , _A : List[str]=None ) -> List[str]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Optional[Any] = [self.cls_token_id]
__magic_name__ : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __lowerCAmelCase ( self : Optional[int] , _A : int , _A : str=None ) -> int:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __lowerCAmelCase ( self : List[Any] , _A : Dict , _A : Union[str, Any]=None , _A : int=False ) -> Dict:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def __lowerCAmelCase ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A ) + 1) + [1] * (len(_A ) + 3)
def __lowerCAmelCase ( self : str , _A : Tuple ) -> List[Any]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> List[str]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __lowerCAmelCase ( self : Dict , _A : str ) -> Union[str, Any]:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __lowerCAmelCase ( self : Any , _A : Any ) -> Tuple:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A ) == 1:
__magic_name__ : List[str] = unicodedata.category(_A )
if cat == "Zs":
return True
return False
def __lowerCAmelCase ( self : int , _A : int ) -> List[str]:
__magic_name__ : Dict = {}
with io.open(_A , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_A ):
__magic_name__ : List[str] = line.rstrip('\n' )
__magic_name__ : Dict = int(_A )
return token_to_idx
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
__magic_name__ : Optional[Any] = 0
if os.path.isdir(_A ):
__magic_name__ : Optional[int] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__magic_name__ : Union[str, Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__magic_name__ : Optional[Any] = token_index
writer.write(token + '\n' )
index += 1
__magic_name__ : Dict = os.path.join(_A , 'sentencepiece.bpe.model' )
with open(_A , 'wb' ) as fi:
__magic_name__ : List[str] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (vocab_file,) | 331 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 1 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=lowerCAmelCase , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=lowerCAmelCase , default=5 )
parser.add_argument('--batch_size' , type=lowerCAmelCase , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=lowerCAmelCase , default=1 )
parser.add_argument('--freeze' , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument('--learning_rate' , type=lowerCAmelCase , default=5e-4 )
parser.add_argument('--seed' , type=lowerCAmelCase , default=0 )
parser.add_argument('--lr_scheduler_type' , type=lowerCAmelCase , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=lowerCAmelCase , default=10 )
parser.add_argument('--weight_decay' , type=lowerCAmelCase , default=0.01 )
parser.add_argument('--output_dir' , type=lowerCAmelCase , default='./results' )
return parser.parse_args()
lowerCAmelCase :str = load('''accuracy''')
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ , __magic_name__ : List[Any] = eval_pred
__magic_name__ : List[Any] = np.argmax(lowerCAmelCase , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=lowerCAmelCase )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Tuple , _A : Tuple ) -> None:
super().__init__()
__magic_name__ : List[Any] = trainer
def __lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : Dict , _A : Any , **_A : List[Any] ) -> int:
if control.should_evaluate:
__magic_name__ : Optional[int] = deepcopy(_A )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : str = get_args()
set_seed(args.seed )
__magic_name__ : str = load_dataset('codeparrot/codecomplex' , split='train' )
__magic_name__ : Tuple = dataset.train_test_split(test_size=0.2 )
__magic_name__ : str = train_test['test'].train_test_split(test_size=0.5 )
__magic_name__ : List[str] = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
__magic_name__ : Dict = tokenizer.eos_token
__magic_name__ : int = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
__magic_name__ : List[Any] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
__magic_name__ : Any = False
__magic_name__ : Union[str, Any] = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(lowerCAmelCase : Dict ):
__magic_name__ : Union[str, Any] = tokenizer(example['src'] , truncation=lowerCAmelCase , max_length=1024 )
__magic_name__ : Optional[Any] = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
__magic_name__ : Optional[Any] = train_test_validation.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=train_test_validation['train'].column_names , )
__magic_name__ : Optional[Any] = DataCollatorWithPadding(tokenizer=lowerCAmelCase )
__magic_name__ : int = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
__magic_name__ : Union[str, Any] = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , compute_metrics=lowerCAmelCase , )
print('Training...' )
trainer.add_callback(CustomCallback(lowerCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main() | 331 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
__magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__magic_name__ : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Any = [False] * len(lowerCAmelCase )
__magic_name__ : int = [-1] * len(lowerCAmelCase )
def dfs(lowerCAmelCase : Tuple , lowerCAmelCase : Any ):
__magic_name__ : List[str] = True
__magic_name__ : List[str] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase , 1 - c )
for i in range(len(lowerCAmelCase ) ):
if not visited[i]:
dfs(lowerCAmelCase , 0 )
for i in range(len(lowerCAmelCase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase :List[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 331 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 331 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
__magic_name__ : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small' )
__magic_name__ : Tuple = AutoTokenizer.from_pretrained('google/mt5-small' )
__magic_name__ : int = tokenizer('Hello there' , return_tensors='np' ).input_ids
__magic_name__ : List[Any] = tokenizer('Hi I am' , return_tensors='np' ).input_ids
__magic_name__ : Any = shift_tokens_right(_A , model.config.pad_token_id , model.config.decoder_start_token_id )
__magic_name__ : Optional[Any] = model(_A , decoder_input_ids=_A ).logits
__magic_name__ : Dict = optax.softmax_cross_entropy(_A , onehot(_A , logits.shape[-1] ) ).mean()
__magic_name__ : Dict = -(labels.shape[-1] * loss.item())
__magic_name__ : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 ) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCAmelCase : list , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : List[Any] = []
__magic_name__ , __magic_name__ : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__magic_name__ : Optional[int] = result + left + right
return input_list
def lowerCamelCase ( lowerCAmelCase : list ):
"""simple docstring"""
if len(lowerCAmelCase ) <= 1:
return input_list
__magic_name__ : List[str] = list(lowerCAmelCase )
# iteration for two-way merging
__magic_name__ : Union[str, Any] = 2
while p <= len(lowerCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ):
__magic_name__ : str = i
__magic_name__ : List[str] = i + p - 1
__magic_name__ : Tuple = (low + high + 1) // 2
__magic_name__ : Union[str, Any] = merge(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase ):
__magic_name__ : Optional[Any] = i
__magic_name__ : Optional[int] = merge(lowerCAmelCase , 0 , lowerCAmelCase , len(lowerCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
lowerCAmelCase :Tuple = []
else:
lowerCAmelCase :List[Any] = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted)) | 331 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCAmelCase : list[float] ):
"""simple docstring"""
__magic_name__ : Tuple = 0.00
__magic_name__ : int = 0
for resistor in resistors:
if resistor <= 0:
__magic_name__ : Tuple = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowerCAmelCase )
first_sum += 1 / float(lowerCAmelCase )
index += 1
return 1 / first_sum
def lowerCamelCase ( lowerCAmelCase : list[float] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = 0.00
__magic_name__ : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__magic_name__ : str = f'Resistor at index {index} has a negative value!'
raise ValueError(lowerCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase :int = '''pt'''
elif is_tf_available():
lowerCAmelCase :Optional[Any] = '''tf'''
else:
lowerCAmelCase :Optional[Any] = '''jax'''
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ByTaTokenizer
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__magic_name__ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ : Optional[Any] = []
for i in range(len(_A ) ):
try:
__magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__magic_name__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__magic_name__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ : List[str] = [t[0] for t in toks]
# Ensure consistency
__magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__magic_name__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__magic_name__ : Union[str, Any] = ' ' + output_txt
__magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Any = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.ta_base_tokenizer
__magic_name__ : Optional[int] = 'Unicode €.'
__magic_name__ : Optional[Any] = tokenizer(_A )
__magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : Any = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__magic_name__ : Any = tokenizer('e è é ê ë' )
__magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : List[str] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : List[Any] = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__magic_name__ : str = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = self.ta_base_tokenizer
__magic_name__ : Tuple = [
'Summary of the text.',
'Another summary.',
]
__magic_name__ : Dict = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : Any = ['A long paragraph for summarization. </s>']
__magic_name__ : List[str] = ['Summary of the text. </s>']
# fmt: off
__magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__magic_name__ : str = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def __lowerCAmelCase ( self : Any ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )]
__magic_name__ : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__magic_name__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__magic_name__ : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> int:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> int:
pass
def __lowerCAmelCase ( self : str ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__magic_name__ : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ : List[str] = 0
__magic_name__ : str = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 331 | 1 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCAmelCase :List[str] = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCAmelCase :Any = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCAmelCase :Optional[int] = r'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def __lowerCAmelCase ( self : Tuple , _A : Optional[Any] , _A : List[Any] ) -> List[Any]:
__magic_name__ : List[str] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
__magic_name__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
} | 331 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : Optional[Any]=13 , _A : int=7 , _A : List[Any]=True , _A : Tuple=True , _A : Dict=True , _A : List[Any]=True , _A : str=99 , _A : Union[str, Any]=32 , _A : Union[str, Any]=5 , _A : Dict=4 , _A : Optional[int]=37 , _A : Optional[int]="gelu" , _A : List[Any]=0.1 , _A : str=0.1 , _A : str=512 , _A : Any=16 , _A : Dict=2 , _A : Union[str, Any]=0.02 , _A : Dict=4 , ) -> Tuple:
__magic_name__ : List[Any] = parent
__magic_name__ : Union[str, Any] = batch_size
__magic_name__ : List[str] = seq_length
__magic_name__ : int = is_training
__magic_name__ : Dict = use_attention_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : Union[str, Any] = use_labels
__magic_name__ : Dict = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : List[str] = num_attention_heads
__magic_name__ : List[str] = intermediate_size
__magic_name__ : str = hidden_act
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : int = type_sequence_label_size
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Optional[Any] = num_choices
def __lowerCAmelCase ( self : int ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[int] = None
if self.use_attention_mask:
__magic_name__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] = None
if self.use_token_type_ids:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[str] = config_and_inputs
__magic_name__ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = True
A_ : int = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __lowerCAmelCase ( self : Optional[int] ) -> str:
__magic_name__ : Dict = FlaxRoFormerModelTester(self )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=_A )
__magic_name__ : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_A )
@require_flax
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
__magic_name__ : List[Any] = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__magic_name__ : Optional[Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Union[str, Any] = model(_A )[0]
__magic_name__ : Tuple = 50000
__magic_name__ : str = (1, 6, vocab_size)
self.assertEqual(output.shape , _A )
__magic_name__ : Dict = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _A , atol=1E-4 ) ) | 331 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.