code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
def A (__A : int ) -> str:
"""simple docstring"""
if isinstance(__A , __A ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if isinstance(__A , __A ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if num == 0:
return "0b0"
UpperCAmelCase_ = False
if num < 0:
UpperCAmelCase_ = True
UpperCAmelCase_ = -num
UpperCAmelCase_ = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(__A ) for e in binary )
return "0b" + "".join(str(__A ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( a ):
UpperCAmelCase__ : List[str] = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : List[Any] = '''BlipImageProcessor'''
UpperCAmelCase__ : Dict = '''AutoTokenizer'''
def __init__( self : int , _snake_case : Optional[int] , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = False
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ = self.image_processor
def __call__( self : str , _snake_case : ImageInput = None , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : List[str] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''')
# Get only text
if images is None:
UpperCAmelCase_ = self.tokenizer
UpperCAmelCase_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
return text_encoding
# add pixel_values
UpperCAmelCase_ = self.image_processor(_snake_case , return_tensors=_snake_case)
if text is not None:
UpperCAmelCase_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_token_type_ids=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
else:
UpperCAmelCase_ = None
if text_encoding is not None:
encoding_image_processor.update(_snake_case)
return encoding_image_processor
def lowerCamelCase ( self : Tuple , *_snake_case : Dict , **_snake_case : List[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Union[str, Any] , *_snake_case : List[Any] , **_snake_case : Any):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 7 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : int = StableDiffusionPanoramaPipeline
UpperCAmelCase__ : Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase_ = DDIMScheduler()
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[Any]=0):
"""simple docstring"""
UpperCAmelCase_ = torch.manual_seed(_snake_case)
UpperCAmelCase_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**_snake_case)
UpperCAmelCase_ = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = sd_pipe(**_snake_case).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
super().test_inference_batch_consistent(batch_sizes=[1, 2])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**_snake_case)
UpperCAmelCase_ = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = '''french fries'''
UpperCAmelCase_ = sd_pipe(**_snake_case , negative_prompt=_snake_case)
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**_snake_case)
UpperCAmelCase_ = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = sd_pipe(**_snake_case , view_batch_size=2)
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''')
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**_snake_case)
UpperCAmelCase_ = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = sd_pipe(**_snake_case).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , skip_prk_steps=_snake_case)
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**_snake_case)
UpperCAmelCase_ = sd_pipe.to(_snake_case)
sd_pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = sd_pipe(**_snake_case).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Any , _snake_case : int=0):
"""simple docstring"""
UpperCAmelCase_ = torch.manual_seed(_snake_case)
UpperCAmelCase_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase_ = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''')
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**_snake_case).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
UpperCAmelCase_ = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
])
assert np.abs(expected_slice - image_slice).max() < 1e-2
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_snake_case)
UpperCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**_snake_case).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
UpperCAmelCase_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 0
def callback_fn(_snake_case : int , _snake_case : int , _snake_case : torch.FloatTensor) -> None:
UpperCAmelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
UpperCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
UpperCAmelCase_ = False
UpperCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase_ = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''')
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case)
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
pipe(**_snake_case , callback=_snake_case , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase_ = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''')
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case)
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**_snake_case)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 7 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 1 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
snake_case_ : int = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : int , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
requires_backends(self , '''decord''')
self.check_model_type(_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : Optional[int]=None , _snake_case : Optional[int]=None , _snake_case : Any=None):
"""simple docstring"""
UpperCAmelCase_ = {}
if frame_sampling_rate is not None:
UpperCAmelCase_ = frame_sampling_rate
if num_frames is not None:
UpperCAmelCase_ = num_frames
UpperCAmelCase_ = {}
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union[str, List[str]] , **_snake_case : Any):
"""simple docstring"""
return super().__call__(_snake_case , **_snake_case)
def lowerCamelCase ( self : int , _snake_case : List[str] , _snake_case : Dict=None , _snake_case : Tuple=1):
"""simple docstring"""
if num_frames is None:
UpperCAmelCase_ = self.model.config.num_frames
if video.startswith('''http://''') or video.startswith('''https://'''):
UpperCAmelCase_ = BytesIO(requests.get(_snake_case).content)
UpperCAmelCase_ = VideoReader(_snake_case)
videoreader.seek(0)
UpperCAmelCase_ = 0
UpperCAmelCase_ = num_frames * frame_sampling_rate - 1
UpperCAmelCase_ = np.linspace(_snake_case , _snake_case , num=_snake_case , dtype=np.intaa)
UpperCAmelCase_ = videoreader.get_batch(_snake_case).asnumpy()
UpperCAmelCase_ = list(_snake_case)
UpperCAmelCase_ = self.image_processor(_snake_case , return_tensors=self.framework)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : Any , _snake_case : Union[str, Any] , _snake_case : int=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.softmax(-1)[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 1 |
import string
import numpy
def A (__A : int , __A : int ) -> int:
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , __A )
class __snake_case :
UpperCAmelCase__ : Union[str, Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Union[str, Any] = numpy.vectorize(lambda a : x % 3_6 )
UpperCAmelCase__ : List[str] = numpy.vectorize(a )
def __init__( self : str , _snake_case : numpy.ndarray):
"""simple docstring"""
UpperCAmelCase_ = self.modulus(_snake_case) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
UpperCAmelCase_ = encrypt_key.shape[0]
def lowerCamelCase ( self : Tuple , _snake_case : str):
"""simple docstring"""
return self.key_string.index(_snake_case)
def lowerCamelCase ( self : str , _snake_case : int):
"""simple docstring"""
return self.key_string[round(_snake_case)]
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
UpperCAmelCase_ = det % len(self.key_string)
UpperCAmelCase_ = len(self.key_string)
if greatest_common_divisor(_snake_case , len(self.key_string)) != 1:
UpperCAmelCase_ = (
F"""determinant modular {req_l} of encryption key({det}) """
F"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = [char for char in text.upper() if char in self.key_string]
UpperCAmelCase_ = chars[-1]
while len(_snake_case) % self.break_key != 0:
chars.append(_snake_case)
return "".join(_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.process_text(text.upper())
UpperCAmelCase_ = ''''''
for i in range(0 , len(_snake_case) - self.break_key + 1 , self.break_key):
UpperCAmelCase_ = text[i : i + self.break_key]
UpperCAmelCase_ = [self.replace_letters(_snake_case) for char in batch]
UpperCAmelCase_ = numpy.array([vec]).T
UpperCAmelCase_ = self.modulus(self.encrypt_key.dot(_snake_case)).T.tolist()[
0
]
UpperCAmelCase_ = ''''''.join(
self.replace_digits(_snake_case) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
UpperCAmelCase_ = det % len(self.key_string)
UpperCAmelCase_ = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
UpperCAmelCase_ = i
break
UpperCAmelCase_ = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(_snake_case))
def lowerCamelCase ( self : Dict , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.make_decrypt_key()
UpperCAmelCase_ = self.process_text(text.upper())
UpperCAmelCase_ = ''''''
for i in range(0 , len(_snake_case) - self.break_key + 1 , self.break_key):
UpperCAmelCase_ = text[i : i + self.break_key]
UpperCAmelCase_ = [self.replace_letters(_snake_case) for char in batch]
UpperCAmelCase_ = numpy.array([vec]).T
UpperCAmelCase_ = self.modulus(decrypt_key.dot(_snake_case)).T.tolist()[0]
UpperCAmelCase_ = ''''''.join(
self.replace_digits(_snake_case) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def A () -> None:
"""simple docstring"""
UpperCAmelCase_ = int(input('''Enter the order of the encryption key: ''' ) )
UpperCAmelCase_ = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__A ):
UpperCAmelCase_ = [int(__A ) for x in input().split()]
hill_matrix.append(__A )
UpperCAmelCase_ = HillCipher(numpy.array(__A ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
UpperCAmelCase_ = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
UpperCAmelCase_ = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__A ) )
elif option == "2":
UpperCAmelCase_ = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 7 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''google/mt5-small''')
UpperCAmelCase_ = tokenizer('''Hello there''' , return_tensors='''np''').input_ids
UpperCAmelCase_ = tokenizer('''Hi I am''' , return_tensors='''np''').input_ids
UpperCAmelCase_ = shift_tokens_right(_snake_case , model.config.pad_token_id , model.config.decoder_start_token_id)
UpperCAmelCase_ = model(_snake_case , decoder_input_ids=_snake_case).logits
UpperCAmelCase_ = optax.softmax_cross_entropy(_snake_case , onehot(_snake_case , logits.shape[-1])).mean()
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 7 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class __snake_case :
def __init__( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = {}
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Dict , _snake_case : str=1):
"""simple docstring"""
if self.graph.get(_snake_case):
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
UpperCAmelCase_ = [[w, v]]
if not self.graph.get(_snake_case):
UpperCAmelCase_ = []
def lowerCamelCase ( self : Any):
"""simple docstring"""
return list(self.graph)
def lowerCamelCase ( self : List[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any]):
"""simple docstring"""
if self.graph.get(_snake_case):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any]=-2 , _snake_case : Optional[Any]=-1):
"""simple docstring"""
if s == d:
return []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if s == -2:
UpperCAmelCase_ = list(self.graph)[0]
stack.append(_snake_case)
visited.append(_snake_case)
UpperCAmelCase_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
UpperCAmelCase_ = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(_snake_case)
return visited
else:
stack.append(node[1])
visited.append(node[1])
UpperCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_snake_case) != 0:
UpperCAmelCase_ = stack[len(_snake_case) - 1]
else:
UpperCAmelCase_ = ss
# check if se have reached the starting point
if len(_snake_case) == 0:
return visited
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Union[str, Any]=-1):
"""simple docstring"""
if c == -1:
UpperCAmelCase_ = floor(random() * 10000) + 10
for i in range(_snake_case):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
UpperCAmelCase_ = floor(random() * c) + 1
if n != i:
self.add_pair(_snake_case , _snake_case , 1)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str]=-2):
"""simple docstring"""
UpperCAmelCase_ = deque()
UpperCAmelCase_ = []
if s == -2:
UpperCAmelCase_ = list(self.graph)[0]
d.append(_snake_case)
visited.append(_snake_case)
while d:
UpperCAmelCase_ = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def lowerCamelCase ( self : int , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase ( self : List[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
return len(self.graph[u])
def lowerCamelCase ( self : List[Any] , _snake_case : Tuple=-2):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if s == -2:
UpperCAmelCase_ = list(self.graph)[0]
stack.append(_snake_case)
visited.append(_snake_case)
UpperCAmelCase_ = s
UpperCAmelCase_ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
UpperCAmelCase_ = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
UpperCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop())
if len(_snake_case) != 0:
UpperCAmelCase_ = stack[len(_snake_case) - 1]
else:
UpperCAmelCase_ = ss
# check if se have reached the starting point
if len(_snake_case) == 0:
return sorted_nodes
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = list(self.graph)[0]
stack.append(_snake_case)
visited.append(_snake_case)
UpperCAmelCase_ = -2
UpperCAmelCase_ = []
UpperCAmelCase_ = s
UpperCAmelCase_ = False
UpperCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
UpperCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
UpperCAmelCase_ = len(_snake_case) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
UpperCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ = True
if len(_snake_case) != 0:
UpperCAmelCase_ = stack[len(_snake_case) - 1]
else:
UpperCAmelCase_ = False
indirect_parents.append(_snake_case)
UpperCAmelCase_ = s
UpperCAmelCase_ = ss
# check if se have reached the starting point
if len(_snake_case) == 0:
return list(_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = list(self.graph)[0]
stack.append(_snake_case)
visited.append(_snake_case)
UpperCAmelCase_ = -2
UpperCAmelCase_ = []
UpperCAmelCase_ = s
UpperCAmelCase_ = False
UpperCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
UpperCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
UpperCAmelCase_ = len(_snake_case) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
UpperCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ = True
if len(_snake_case) != 0:
UpperCAmelCase_ = stack[len(_snake_case) - 1]
else:
UpperCAmelCase_ = False
indirect_parents.append(_snake_case)
UpperCAmelCase_ = s
UpperCAmelCase_ = ss
# check if se have reached the starting point
if len(_snake_case) == 0:
return False
def lowerCamelCase ( self : Any , _snake_case : Optional[Any]=-2 , _snake_case : int=-1):
"""simple docstring"""
UpperCAmelCase_ = time()
self.dfs(_snake_case , _snake_case)
UpperCAmelCase_ = time()
return end - begin
def lowerCamelCase ( self : Tuple , _snake_case : Union[str, Any]=-2):
"""simple docstring"""
UpperCAmelCase_ = time()
self.bfs(_snake_case)
UpperCAmelCase_ = time()
return end - begin
class __snake_case :
def __init__( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = {}
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : Tuple=1):
"""simple docstring"""
if self.graph.get(_snake_case):
# if there already is a edge
if self.graph[u].count([w, v]) == 0:
self.graph[u].append([w, v])
else:
# if u does not exist
UpperCAmelCase_ = [[w, v]]
# add the other way
if self.graph.get(_snake_case):
# if there already is a edge
if self.graph[v].count([w, u]) == 0:
self.graph[v].append([w, u])
else:
# if u does not exist
UpperCAmelCase_ = [[w, u]]
def lowerCamelCase ( self : Dict , _snake_case : Optional[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
if self.graph.get(_snake_case):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_snake_case)
# the other way round
if self.graph.get(_snake_case):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_snake_case)
def lowerCamelCase ( self : int , _snake_case : List[Any]=-2 , _snake_case : List[Any]=-1):
"""simple docstring"""
if s == d:
return []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if s == -2:
UpperCAmelCase_ = list(self.graph)[0]
stack.append(_snake_case)
visited.append(_snake_case)
UpperCAmelCase_ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
UpperCAmelCase_ = s
for node in self.graph[s]:
if visited.count(node[1]) < 1:
if node[1] == d:
visited.append(_snake_case)
return visited
else:
stack.append(node[1])
visited.append(node[1])
UpperCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_snake_case) != 0:
UpperCAmelCase_ = stack[len(_snake_case) - 1]
else:
UpperCAmelCase_ = ss
# check if se have reached the starting point
if len(_snake_case) == 0:
return visited
def lowerCamelCase ( self : int , _snake_case : Tuple=-1):
"""simple docstring"""
if c == -1:
UpperCAmelCase_ = floor(random() * 10000) + 10
for i in range(_snake_case):
# every vertex has max 100 edges
for _ in range(floor(random() * 102) + 1):
UpperCAmelCase_ = floor(random() * c) + 1
if n != i:
self.add_pair(_snake_case , _snake_case , 1)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Dict=-2):
"""simple docstring"""
UpperCAmelCase_ = deque()
UpperCAmelCase_ = []
if s == -2:
UpperCAmelCase_ = list(self.graph)[0]
d.append(_snake_case)
visited.append(_snake_case)
while d:
UpperCAmelCase_ = d.popleft()
if len(self.graph[s]) != 0:
for node in self.graph[s]:
if visited.count(node[1]) < 1:
d.append(node[1])
visited.append(node[1])
return visited
def lowerCamelCase ( self : List[str] , _snake_case : Tuple):
"""simple docstring"""
return len(self.graph[u])
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = list(self.graph)[0]
stack.append(_snake_case)
visited.append(_snake_case)
UpperCAmelCase_ = -2
UpperCAmelCase_ = []
UpperCAmelCase_ = s
UpperCAmelCase_ = False
UpperCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
UpperCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
UpperCAmelCase_ = len(_snake_case) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1])
break
else:
anticipating_nodes.add(stack[len_stack])
len_stack -= 1
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
UpperCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ = True
if len(_snake_case) != 0:
UpperCAmelCase_ = stack[len(_snake_case) - 1]
else:
UpperCAmelCase_ = False
indirect_parents.append(_snake_case)
UpperCAmelCase_ = s
UpperCAmelCase_ = ss
# check if se have reached the starting point
if len(_snake_case) == 0:
return list(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = list(self.graph)[0]
stack.append(_snake_case)
visited.append(_snake_case)
UpperCAmelCase_ = -2
UpperCAmelCase_ = []
UpperCAmelCase_ = s
UpperCAmelCase_ = False
UpperCAmelCase_ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s]) != 0:
UpperCAmelCase_ = s
for node in self.graph[s]:
if (
visited.count(node[1]) > 0
and node[1] != parent
and indirect_parents.count(node[1]) > 0
and not on_the_way_back
):
UpperCAmelCase_ = len(_snake_case) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1])
break
else:
return True
if visited.count(node[1]) < 1:
stack.append(node[1])
visited.append(node[1])
UpperCAmelCase_ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
UpperCAmelCase_ = True
if len(_snake_case) != 0:
UpperCAmelCase_ = stack[len(_snake_case) - 1]
else:
UpperCAmelCase_ = False
indirect_parents.append(_snake_case)
UpperCAmelCase_ = s
UpperCAmelCase_ = ss
# check if se have reached the starting point
if len(_snake_case) == 0:
return False
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return list(self.graph)
def lowerCamelCase ( self : List[str] , _snake_case : str=-2 , _snake_case : Optional[int]=-1):
"""simple docstring"""
UpperCAmelCase_ = time()
self.dfs(_snake_case , _snake_case)
UpperCAmelCase_ = time()
return end - begin
def lowerCamelCase ( self : Any , _snake_case : Tuple=-2):
"""simple docstring"""
UpperCAmelCase_ = time()
self.bfs(_snake_case)
UpperCAmelCase_ = time()
return end - begin
| 7 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 1 |
snake_case_ : int = {str(digit): digit**5 for digit in range(10)}
def A (__A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__A ) )
def A () -> int:
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(__A ) )
if __name__ == "__main__":
print(solution())
| 7 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 1 |
from __future__ import annotations
def A (__A : list[list[int]] ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
# We need to create solution object to save path.
UpperCAmelCase_ = [[0 for _ in range(__A )] for _ in range(__A )]
UpperCAmelCase_ = run_maze(__A , 0 , 0 , __A )
if solved:
print('''\n'''.join(str(__A ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def A (__A : list[list[int]] , __A : int , __A : int , __A : list[list[int]] ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase_ = 1
return True
UpperCAmelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase_ = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
UpperCAmelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __snake_case :
def __init__( self : Union[str, Any] , _snake_case : int , _snake_case : List[str]=13 , _snake_case : Any=7 , _snake_case : List[Any]=True , _snake_case : Any=True , _snake_case : Tuple=True , _snake_case : Any=True , _snake_case : int=99 , _snake_case : Dict=32 , _snake_case : Optional[int]=2 , _snake_case : Optional[Any]=4 , _snake_case : str=37 , _snake_case : Dict="gelu" , _snake_case : List[str]=0.1 , _snake_case : Any=0.1 , _snake_case : Optional[int]=512 , _snake_case : str=16 , _snake_case : Union[str, Any]=2 , _snake_case : Optional[Any]=0.0_2 , _snake_case : int=3 , _snake_case : List[str]=4 , _snake_case : Any=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = 13
UpperCAmelCase_ = 7
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = 99
UpperCAmelCase_ = 384
UpperCAmelCase_ = 2
UpperCAmelCase_ = 4
UpperCAmelCase_ = 37
UpperCAmelCase_ = '''gelu'''
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = 512
UpperCAmelCase_ = 16
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0.0_2
UpperCAmelCase_ = 3
UpperCAmelCase_ = 4
UpperCAmelCase_ = 128
UpperCAmelCase_ = 2
UpperCAmelCase_ = 9
UpperCAmelCase_ = 1
UpperCAmelCase_ = None
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : str , _snake_case : Any , _snake_case : Dict , _snake_case : str , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = TFConvBertModel(config=_snake_case)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ = [input_ids, input_mask]
UpperCAmelCase_ = model(_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : Dict , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Dict , _snake_case : str , _snake_case : Optional[int] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = TFConvBertForMaskedLM(config=_snake_case)
UpperCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : Any , _snake_case : int , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : int , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFConvBertForSequenceClassification(config=_snake_case)
UpperCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = TFConvBertForMultipleChoice(config=_snake_case)
UpperCAmelCase_ = tf.tile(tf.expand_dims(_snake_case , 1) , (1, self.num_choices, 1))
UpperCAmelCase_ = tf.tile(tf.expand_dims(_snake_case , 1) , (1, self.num_choices, 1))
UpperCAmelCase_ = tf.tile(tf.expand_dims(_snake_case , 1) , (1, self.num_choices, 1))
UpperCAmelCase_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase ( self : Tuple , _snake_case : Any , _snake_case : str , _snake_case : List[str] , _snake_case : int , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFConvBertForTokenClassification(config=_snake_case)
UpperCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : Tuple , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : str , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = TFConvBertForQuestionAnswering(config=_snake_case)
UpperCAmelCase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[str] = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TFConvBertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case)
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
if hasattr(_snake_case , '''use_cache'''):
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length)
UpperCAmelCase_ = getattr(self.model_tester , '''key_length''' , _snake_case)
for model_class in self.all_model_classes:
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = len(model(_snake_case))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case , saved_model=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''saved_model''' , '''1''')
UpperCAmelCase_ = tf.keras.models.load_model(_snake_case)
UpperCAmelCase_ = model(_snake_case)
if self.is_encoder_decoder:
UpperCAmelCase_ = outputs['''encoder_hidden_states''']
UpperCAmelCase_ = outputs['''encoder_attentions''']
else:
UpperCAmelCase_ = outputs['''hidden_states''']
UpperCAmelCase_ = outputs['''attentions''']
self.assertEqual(len(_snake_case) , _snake_case)
UpperCAmelCase_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_snake_case) , _snake_case)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
UpperCAmelCase_ = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length)
UpperCAmelCase_ = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length)
UpperCAmelCase_ = getattr(self.model_tester , '''key_length''' , _snake_case)
UpperCAmelCase_ = getattr(self.model_tester , '''key_length''' , _snake_case)
def check_decoder_attentions_output(_snake_case : List[str]):
UpperCAmelCase_ = len(_snake_case)
self.assertEqual(out_len % 2 , 0)
UpperCAmelCase_ = outputs.decoder_attentions
self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_snake_case : Optional[Any]):
UpperCAmelCase_ = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_snake_case) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = len(_snake_case)
self.assertEqual(config.output_hidden_states , _snake_case)
check_encoder_attentions_output(_snake_case)
if self.is_encoder_decoder:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(self._prepare_for_class(_snake_case , _snake_case))
self.assertEqual(config.output_hidden_states , _snake_case)
check_decoder_attentions_output(_snake_case)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(self._prepare_for_class(_snake_case , _snake_case))
self.assertEqual(config.output_hidden_states , _snake_case)
check_encoder_attentions_output(_snake_case)
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(self._prepare_for_class(_snake_case , _snake_case))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_snake_case))
self.assertEqual(model.config.output_hidden_states , _snake_case)
check_encoder_attentions_output(_snake_case)
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''')
UpperCAmelCase_ = tf.constant([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase_ = model(_snake_case)[0]
UpperCAmelCase_ = [1, 6, 768]
self.assertEqual(output.shape , _snake_case)
UpperCAmelCase_ = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _snake_case , atol=1e-4)
| 7 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __snake_case ( a ):
UpperCAmelCase__ : Union[List[PIL.Image.Image], np.ndarray]
UpperCAmelCase__ : Optional[List[bool]]
UpperCAmelCase__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 7 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __snake_case ( pl.LightningModule ):
def __init__( self : str , _snake_case : List[str]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = model
UpperCAmelCase_ = 2
UpperCAmelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels)
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
def A (__A : str , __A : str , __A : str ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = LongformerModel.from_pretrained(__A )
UpperCAmelCase_ = LightningModel(__A )
UpperCAmelCase_ = torch.load(__A , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
UpperCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(__A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__A )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
snake_case_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case_ : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 7 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __snake_case ( nn.Module ):
def __init__( self : List[Any]):
"""simple docstring"""
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def lowerCamelCase ( self : Dict , _snake_case : Union[str, Any]):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(_snake_case)))
class __snake_case ( a ):
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Tuple , *_snake_case : int , **_snake_case : List[Any]):
"""simple docstring"""
return (args[0] + 1,) + args[1:], kwargs
class __snake_case ( a ):
def lowerCamelCase ( self : Any , _snake_case : List[str] , _snake_case : Any):
"""simple docstring"""
return output + 1
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_snake_case , _snake_case)
self.assertEqual(test_model._hf_hook , _snake_case)
self.assertTrue(hasattr(_snake_case , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_snake_case)
self.assertFalse(hasattr(_snake_case , '''_hf_hook'''))
self.assertFalse(hasattr(_snake_case , '''_old_forward'''))
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = ModelHook()
add_hook_to_module(_snake_case , _snake_case)
add_hook_to_module(_snake_case , _snake_case , append=_snake_case)
self.assertEqual(isinstance(test_model._hf_hook , _snake_case) , _snake_case)
self.assertEqual(len(test_model._hf_hook.hooks) , 2)
self.assertTrue(hasattr(_snake_case , '''_old_forward'''))
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''')
self.assertListEqual(list(inspect.signature(test_model.forward).parameters) , ['''x'''])
remove_hook_from_module(_snake_case)
self.assertFalse(hasattr(_snake_case , '''_hf_hook'''))
self.assertFalse(hasattr(_snake_case , '''_old_forward'''))
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(x + 1)
UpperCAmelCase_ = test_model(x + 2)
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_snake_case , _snake_case)
UpperCAmelCase_ = test_model(_snake_case)
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PreForwardHook()
add_hook_to_module(_snake_case , _snake_case)
UpperCAmelCase_ = test_model(_snake_case)
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PreForwardHook() , PreForwardHook())
add_hook_to_module(_snake_case , _snake_case)
UpperCAmelCase_ = test_model(_snake_case)
assert torch.allclose(_snake_case , _snake_case , atol=1e-5)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_snake_case)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_snake_case , _snake_case)
UpperCAmelCase_ = test_model(_snake_case)
self.assertTrue(torch.allclose(_snake_case , output + 1 , atol=1e-5))
# Attaching a hook to a model when it already has one replaces, does not chain
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_snake_case , _snake_case)
UpperCAmelCase_ = test_model(_snake_case)
self.assertTrue(torch.allclose(_snake_case , output + 1 , atol=1e-5))
# You need to use the sequential hook to chain two or more hooks
UpperCAmelCase_ = SequentialHook(PostForwardHook() , PostForwardHook())
add_hook_to_module(_snake_case , _snake_case)
UpperCAmelCase_ = test_model(_snake_case)
assert torch.allclose(_snake_case , output + 2 , atol=1e-5)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = test_model(_snake_case)
UpperCAmelCase_ = PostForwardHook()
add_hook_to_module(_snake_case , _snake_case)
UpperCAmelCase_ = test_model(_snake_case)
self.assertTrue(torch.allclose(_snake_case , output + 1))
self.assertTrue(outputa.requires_grad)
UpperCAmelCase_ = True
UpperCAmelCase_ = test_model(_snake_case)
self.assertFalse(outputa.requires_grad)
@require_multi_gpu
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0))
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1))
self.assertEqual(model.lineara.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.weight.device , torch.device(0))
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0))
self.assertEqual(model.lineara.weight.device , torch.device(1))
# We can still make a forward pass. The input does not need to be on any particular device
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_snake_case)
self.assertEqual(output.device , torch.device(1))
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_snake_case , AlignDevicesHook(io_same_device=_snake_case))
UpperCAmelCase_ = torch.randn(2 , 3).to(0)
UpperCAmelCase_ = model(_snake_case)
self.assertEqual(output.device , torch.device(0))
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_snake_case))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_snake_case))
add_hook_to_module(model.lineara , AlignDevicesHook(**_snake_case))
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(hook_kwargs['''execution_device'''])
self.assertEqual(model.batchnorm.running_mean.device , _snake_case)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_snake_case)
self.assertEqual(output.device , _snake_case)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
UpperCAmelCase_ = {
'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''',
'''offload''': True,
'''offload_buffers''': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_snake_case))
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_snake_case))
add_hook_to_module(model.lineara , AlignDevicesHook(**_snake_case))
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_snake_case)
self.assertEqual(output.device , _snake_case)
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara)
remove_hook_from_module(model.batchnorm)
remove_hook_from_module(model.lineara)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(_snake_case , execution_device=_snake_case , offload=_snake_case)
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_snake_case)
self.assertEqual(model.batchnorm.running_mean.device , _snake_case)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_snake_case)
self.assertEqual(output.device , _snake_case)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_snake_case)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(_snake_case , execution_device=_snake_case , offload=_snake_case , offload_buffers=_snake_case)
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_snake_case)
self.assertEqual(output.device , _snake_case)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_snake_case)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# This will move each submodule on different devices
UpperCAmelCase_ = 0 if torch.cuda.is_available() else '''cpu'''
attach_align_device_hook(
_snake_case , execution_device=_snake_case , offload=_snake_case , weights_map=model.state_dict())
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
# Buffers are not included in the offload by default, so are on the execution device
UpperCAmelCase_ = torch.device(_snake_case)
self.assertEqual(model.batchnorm.running_mean.device , _snake_case)
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_snake_case)
self.assertEqual(output.device , _snake_case)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_snake_case)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
# Now test with buffers included in the offload
attach_align_device_hook(
_snake_case , execution_device=_snake_case , offload=_snake_case , weights_map=model.state_dict() , offload_buffers=_snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta'''))
self.assertEqual(model.lineara.weight.device , torch.device('''meta'''))
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta'''))
UpperCAmelCase_ = torch.randn(2 , 3)
UpperCAmelCase_ = model(_snake_case)
self.assertEqual(output.device , _snake_case)
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_snake_case)
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu'''))
self.assertEqual(model.lineara.weight.device , torch.device('''cpu'''))
| 7 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Dict = BertJapaneseTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def lowerCamelCase ( self : Dict , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''こんにちは、世界。 \nこんばんは、世界。'''
UpperCAmelCase_ = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.get_input_output_texts(_snake_case)
UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
UpperCAmelCase_ = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case)
return text, ids
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self : str):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file)
UpperCAmelCase_ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''')
self.assertListEqual(_snake_case , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''')
self.assertIsNotNone(_snake_case)
UpperCAmelCase_ = '''こんにちは、世界。\nこんばんは、世界。'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
UpperCAmelCase_ = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(_snake_case , '''wb''') as handle:
pickle.dump(_snake_case , _snake_case)
with open(_snake_case , '''rb''') as handle:
UpperCAmelCase_ = pickle.load(_snake_case)
UpperCAmelCase_ = tokenizer_new.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = MecabTokenizer(mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
try:
UpperCAmelCase_ = MecabTokenizer(mecab_dic='''unidic_lite''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
try:
UpperCAmelCase_ = MecabTokenizer(mecab_dic='''unidic''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = MecabTokenizer(do_lower_case=_snake_case , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def lowerCamelCase ( self : int):
"""simple docstring"""
try:
UpperCAmelCase_ = MecabTokenizer(
do_lower_case=_snake_case , normalize_text=_snake_case , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = MecabTokenizer(normalize_text=_snake_case , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''')
self.assertIsNotNone(_snake_case)
UpperCAmelCase_ = '''こんにちは、世界。\nこんばんは、世界。'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
UpperCAmelCase_ = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(_snake_case , '''wb''') as handle:
pickle.dump(_snake_case , _snake_case)
with open(_snake_case , '''rb''') as handle:
UpperCAmelCase_ = pickle.load(_snake_case)
UpperCAmelCase_ = tokenizer_new.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
@require_sudachi
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国''', '''人''', '''参政''', '''権'''])
@require_sudachi
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人''', '''参政権'''])
@require_sudachi
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人参政権'''])
@require_sudachi
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = SudachiTokenizer(do_lower_case=_snake_case , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = SudachiTokenizer(normalize_text=_snake_case , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = SudachiTokenizer(trim_whitespace=_snake_case , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''')
self.assertIsNotNone(_snake_case)
UpperCAmelCase_ = '''こんにちは、世界。\nこんばんは、世界。'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , [3, 12, 10, 14, 4, 9, 12, 10, 14])
UpperCAmelCase_ = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(_snake_case , '''wb''') as handle:
pickle.dump(_snake_case , _snake_case)
with open(_snake_case , '''rb''') as handle:
UpperCAmelCase_ = pickle.load(_snake_case)
UpperCAmelCase_ = tokenizer_new.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
@require_jumanpp
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = JumanppTokenizer(do_lower_case=_snake_case)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = JumanppTokenizer(normalize_text=_snake_case)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = JumanppTokenizer(trim_whitespace=_snake_case)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''') , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
UpperCAmelCase_ = {}
for i, token in enumerate(_snake_case):
UpperCAmelCase_ = i
UpperCAmelCase_ = WordpieceTokenizer(vocab=_snake_case , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こんにちは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは''') , ['''こん''', '''##ばんは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''') , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''')
UpperCAmelCase_ = tokenizer.subword_tokenizer
UpperCAmelCase_ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''')
self.assertListEqual(_snake_case , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''])
UpperCAmelCase_ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''')
self.assertListEqual(_snake_case , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''])
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''')
UpperCAmelCase_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_snake_case)
UpperCAmelCase_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_snake_case)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_snake_case)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = BertJapaneseTokenizer
UpperCAmelCase__ : Optional[int] = False
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def lowerCamelCase ( self : Dict , **_snake_case : Any):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''こんにちは、世界。 \nこんばんは、世界。'''
UpperCAmelCase_ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def lowerCamelCase ( self : str):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self : int):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self : int):
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''')
UpperCAmelCase_ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''')
self.assertListEqual(
_snake_case , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12])
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
UpperCAmelCase_ = {}
for i, token in enumerate(_snake_case):
UpperCAmelCase_ = i
UpperCAmelCase_ = CharacterTokenizer(vocab=_snake_case , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''])
self.assertListEqual(tokenizer.tokenize('''こんにちほ''') , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''])
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''')
UpperCAmelCase_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_snake_case)
UpperCAmelCase_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_snake_case)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_snake_case)
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_snake_case , _snake_case)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = '''cl-tohoku/bert-base-japanese'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertTokenizer.from_pretrained(_snake_case)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
UpperCAmelCase_ = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertJapaneseTokenizer.from_pretrained(_snake_case)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
| 7 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 | 1 |
from __future__ import annotations
from scipy.special import comb # type: ignore
class __snake_case :
def __init__( self : Union[str, Any] , _snake_case : list[tuple[float, float]]):
"""simple docstring"""
UpperCAmelCase_ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
UpperCAmelCase_ = len(_snake_case) - 1
def lowerCamelCase ( self : Optional[Any] , _snake_case : float):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase_ = []
for i in range(len(self.list_of_points)):
# basis function for each i
output_values.append(
comb(self.degree , _snake_case) * ((1 - t) ** (self.degree - i)) * (t**i))
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(_snake_case) , 5) == 1
return output_values
def lowerCamelCase ( self : str , _snake_case : float):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
UpperCAmelCase_ = self.basis_function(_snake_case)
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = 0.0
for i in range(len(self.list_of_points)):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def lowerCamelCase ( self : Dict , _snake_case : float = 0.0_1):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
UpperCAmelCase_ = [] # x coordinates of points to plot
UpperCAmelCase_ = [] # y coordinates of points to plot
UpperCAmelCase_ = 0.0
while t <= 1:
UpperCAmelCase_ = self.bezier_curve_function(_snake_case)
to_plot_x.append(value[0])
to_plot_y.append(value[1])
t += step_size
UpperCAmelCase_ = [i[0] for i in self.list_of_points]
UpperCAmelCase_ = [i[1] for i in self.list_of_points]
plt.plot(
_snake_case , _snake_case , color='''blue''' , label='''Curve of Degree ''' + str(self.degree) , )
plt.scatter(_snake_case , _snake_case , color='''red''' , label='''Control Points''')
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 7 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 1 |
def A (__A : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
UpperCAmelCase_ = 4
UpperCAmelCase_ = (1 << p) - 1
for _ in range(p - 2 ):
UpperCAmelCase_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 7 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
snake_case_ : Optional[int] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A (__A : Tuple , __A : List[str] , __A : Optional[Any] , __A : Optional[int]=None ) -> int:
"""simple docstring"""
UpperCAmelCase_ = XLNetConfig.from_json_file(__A )
UpperCAmelCase_ = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_ = XLNetForSequenceClassification(__A )
elif "squad" in finetuning_task:
UpperCAmelCase_ = finetuning_task
UpperCAmelCase_ = XLNetForQuestionAnswering(__A )
else:
UpperCAmelCase_ = XLNetLMHeadModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__A , __A , __A )
# Save pytorch-model
UpperCAmelCase_ = os.path.join(__A , __A )
UpperCAmelCase_ = os.path.join(__A , __A )
print(F"""Save PyTorch model to {os.path.abspath(__A )}""" )
torch.save(model.state_dict() , __A )
print(F"""Save configuration file to {os.path.abspath(__A )}""" )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
snake_case_ : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 7 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 0
@slow
def lowerCamelCase ( self : Any):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_snake_case) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
self.assertIsInstance(_snake_case , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_snake_case) , 0)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 20)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , config=_snake_case)
self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def lowerCamelCase ( self : str):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_snake_case , '''vocab.txt'''))
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , tokenizer_type='''bert''' , use_fast=_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_snake_case , '''vocab.json'''))
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_snake_case , '''merges.txt'''))
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , tokenizer_type='''gpt2''' , use_fast=_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
@require_tokenizers
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_snake_case , '''vocab.txt'''))
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , tokenizer_type='''bert''')
self.assertIsInstance(_snake_case , _snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_snake_case , '''vocab.json'''))
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_snake_case , '''merges.txt'''))
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , tokenizer_type='''gpt2''')
self.assertIsInstance(_snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
with pytest.raises(_snake_case):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''')
@require_tokenizers
def lowerCamelCase ( self : str):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''')
self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast))
if isinstance(_snake_case , _snake_case):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _snake_case)
else:
self.assertEqual(tokenizer.do_lower_case , _snake_case)
self.assertEqual(tokenizer.model_max_length , 512)
@require_tokenizers
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_snake_case , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCAmelCase_ = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKENIZER_MAPPING.values()
UpperCAmelCase_ = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_snake_case)
@require_tokenizers
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_snake_case) , _snake_case)
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''') , _snake_case)
@require_tokenizers
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_snake_case)
UpperCAmelCase_ = '''Hello, world. How are you?'''
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
self.assertEqual('''[UNK]''' , tokens[0])
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_snake_case)
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
self.assertEqual('''[UNK]''' , tokens[0])
@require_tokenizers
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''')
self.assertEqual(type(_snake_case) , _snake_case)
self.assertEqual(tokenizer.model_max_length , 512)
self.assertEqual(tokenizer.vocab_size , 30000)
self.assertEqual(tokenizer.unk_token , '''[UNK]''')
self.assertEqual(tokenizer.padding_side , '''right''')
self.assertEqual(tokenizer.truncation_side , '''right''')
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 12)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''ctrl''')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_snake_case , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = get_tokenizer_config('''bert-base-cased''')
UpperCAmelCase_ = config.pop('''_commit_hash''' , _snake_case)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_snake_case , {'''do_lower_case''': False})
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ = get_tokenizer_config(_snake_case)
self.assertDictEqual(_snake_case , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = get_tokenizer_config(_snake_case)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''')
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _snake_case)
AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_snake_case):
AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case)
UpperCAmelCase_ = CustomTokenizer.from_pretrained(_snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase ( self : str):
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _snake_case)
# Can register in two steps
AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_snake_case , fast_tokenizer_class=_snake_case)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_snake_case , slow_tokenizer_class=_snake_case , fast_tokenizer_class=_snake_case)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_snake_case):
AutoTokenizer.register(_snake_case , fast_tokenizer_class=_snake_case)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = BertTokenizerFast.from_pretrained(_snake_case)
bert_tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = CustomTokenizerFast.from_pretrained(_snake_case)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , use_fast=_snake_case)
self.assertIsInstance(_snake_case , _snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
with self.assertRaises(_snake_case):
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_snake_case):
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , trust_remote_code=_snake_case)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case , use_fast=_snake_case)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , trust_remote_code=_snake_case , use_fast=_snake_case)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''')
@require_tokenizers
def lowerCamelCase ( self : Any):
"""simple docstring"""
class __snake_case ( a ):
UpperCAmelCase__ : Dict = False
class __snake_case ( a ):
UpperCAmelCase__ : List[str] = NewTokenizer
UpperCAmelCase__ : Tuple = False
try:
AutoConfig.register('''custom''' , _snake_case)
AutoTokenizer.register(_snake_case , slow_tokenizer_class=_snake_case)
AutoTokenizer.register(_snake_case , fast_tokenizer_class=_snake_case)
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''')
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertFalse(tokenizer.special_attribute_present)
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_snake_case)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertFalse(tokenizer.special_attribute_present)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case , use_fast=_snake_case)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertTrue(tokenizer.special_attribute_present)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_snake_case , use_fast=_snake_case)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_snake_case)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_snake_case , use_fast=_snake_case)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
def lowerCamelCase ( self : int):
"""simple docstring"""
with self.assertRaisesRegex(
_snake_case , '''bert-base is not a local folder and is not a valid model identifier'''):
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base''')
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaisesRegex(
_snake_case , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_snake_case , revision='''aaaaaa''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
with RequestCounter() as counter:
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 7 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : int = Dict[str, Any]
snake_case_ : Optional[int] = List[Prediction]
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : str , *_snake_case : List[str] , **_snake_case : Any):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , '''vision''')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def lowerCamelCase ( self : int , **_snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {}
if "threshold" in kwargs:
UpperCAmelCase_ = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : Optional[int] , *_snake_case : List[Any] , **_snake_case : str):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = load_image(_snake_case)
UpperCAmelCase_ = torch.IntTensor([[image.height, image.width]])
UpperCAmelCase_ = self.image_processor(images=[image] , return_tensors='''pt''')
if self.tokenizer is not None:
UpperCAmelCase_ = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''')
UpperCAmelCase_ = target_size
return inputs
def lowerCamelCase ( self : int , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = model_inputs.pop('''target_size''')
UpperCAmelCase_ = self.model(**_snake_case)
UpperCAmelCase_ = outputs.__class__({'''target_size''': target_size, **outputs})
if self.tokenizer is not None:
UpperCAmelCase_ = model_inputs['''bbox''']
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Any , _snake_case : List[str]=0.9):
"""simple docstring"""
UpperCAmelCase_ = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase_ , UpperCAmelCase_ = target_size[0].tolist()
def unnormalize(_snake_case : Optional[int]):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
UpperCAmelCase_ , UpperCAmelCase_ = model_outputs['''logits'''].squeeze(0).softmax(dim=-1).max(dim=-1)
UpperCAmelCase_ = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase_ = [unnormalize(_snake_case) for bbox in model_outputs['''bbox'''].squeeze(0)]
UpperCAmelCase_ = ['''score''', '''label''', '''box''']
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for vals in zip(scores.tolist() , _snake_case , _snake_case) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase_ = self.image_processor.post_process_object_detection(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = raw_annotations[0]
UpperCAmelCase_ = raw_annotation['''scores''']
UpperCAmelCase_ = raw_annotation['''labels''']
UpperCAmelCase_ = raw_annotation['''boxes''']
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase_ = [self._get_bounding_box(_snake_case) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase_ = ['''score''', '''label''', '''box''']
UpperCAmelCase_ = [
dict(zip(_snake_case , _snake_case))
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''])
]
return annotation
def lowerCamelCase ( self : Union[str, Any] , _snake_case : "torch.Tensor"):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''')
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = box.int().tolist()
UpperCAmelCase_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 7 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
snake_case_ : Optional[Any] = False
class __snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe.dual_guided(
prompt='''first prompt''' , image=_snake_case , text_to_image_strength=0.7_5 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case)
UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained(_snake_case , torch_dtype=torch.floataa)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = generator.manual_seed(0)
UpperCAmelCase_ = pipe.dual_guided(
prompt='''first prompt''' , image=_snake_case , text_to_image_strength=0.7_5 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass"
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = '''cyberpunk 2077'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''')
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe.dual_guided(
prompt=_snake_case , image=_snake_case , text_to_image_strength=0.7_5 , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
UpperCAmelCase_ = '''A painting of a squirrel eating a burger '''
UpperCAmelCase_ = torch.manual_seed(0)
UpperCAmelCase_ = pipe.text_to_image(
prompt=_snake_case , generator=_snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''').images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
UpperCAmelCase_ = pipe.image_variation(_snake_case , generator=_snake_case , output_type='''numpy''').images
UpperCAmelCase_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
| 7 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : int = {"vocab_file": "spiece.model"}
snake_case_ : List[str] = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
snake_case_ : Union[str, Any] = {"bert_for_seq_generation": 512}
class __snake_case ( a ):
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , _snake_case : List[str] , _snake_case : Optional[int]="<s>" , _snake_case : int="</s>" , _snake_case : List[str]="<unk>" , _snake_case : Any="<pad>" , _snake_case : Tuple="<::::>" , _snake_case : Optional[Dict[str, Any]] = None , **_snake_case : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , sep_token=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_snake_case)
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(_snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCamelCase ( self : int , _snake_case : str):
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case)
def lowerCamelCase ( self : int , _snake_case : List[Any]):
"""simple docstring"""
return self.sp_model.piece_to_id(_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.sp_model.IdToPiece(_snake_case)
return token
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case) + token
UpperCAmelCase_ = []
else:
current_sub_tokens.append(_snake_case)
out_string += self.sp_model.decode(_snake_case)
return out_string.strip()
def lowerCamelCase ( self : Union[str, Any] , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(_snake_case):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
UpperCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _snake_case)
elif not os.path.isfile(self.vocab_file):
with open(_snake_case , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_snake_case)
return (out_vocab_file,)
| 7 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 1 |
def A (__A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCAmelCase_ = len(__A ) if (len(__A ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(__A ) , '''Postfix'''.center(__A ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__A ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__A ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__A ) == 0:
stack.append(__A ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__A ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__A ) # push x to stack
print(
x.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format
while len(__A ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(__A )).ljust(__A ) , (''''''.join(__A )).ljust(__A ) , sep=''' | ''' , ) # Output in tabular format
return "".join(__A ) # return Postfix as str
def A (__A : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__A ) ):
if infix[i] == "(":
UpperCAmelCase_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCAmelCase_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__A ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
snake_case_ : int = input("\nEnter an Infix Equation = ") # Input an Infix equation
snake_case_ : str = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 7 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 1 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
snake_case_ : Union[str, Any] = 2048
snake_case_ : str = 4096
snake_case_ : Union[str, Any] = 42
snake_case_ : List[str] = os.environ.pop("PROCESS_TRAIN", "false")
snake_case_ : Tuple = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def A (__A : List[Any] ) -> List[str]:
"""simple docstring"""
def choose_first(__A : Any , __A : str=False ):
assert isinstance(__A , __A )
if len(__A ) == 1:
UpperCAmelCase_ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCAmelCase_ = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
UpperCAmelCase_ = {'''id''': example['''id''']}
UpperCAmelCase_ = example['''annotations''']
UpperCAmelCase_ = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCAmelCase_ = ['''yes'''] if 1 in yes_no_answer else ['''no''']
UpperCAmelCase_ = UpperCAmelCase_ = []
UpperCAmelCase_ = UpperCAmelCase_ = []
UpperCAmelCase_ = ['''<cls>''']
else:
UpperCAmelCase_ = ['''short''']
UpperCAmelCase_ = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
UpperCAmelCase_ = ['''long''']
UpperCAmelCase_ = choose_first(annotation['''long_answer'''] , is_long_answer=__A )
UpperCAmelCase_ = []
answer.update(__A )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCAmelCase_ = True
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , __A ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def A (__A : Optional[int] , __A : Any=False ) -> str:
"""simple docstring"""
UpperCAmelCase_ = _get_single_answer(__A )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase_ = example['''document''']['''tokens''']
UpperCAmelCase_ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(__A ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCAmelCase_ = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCAmelCase_ = example['''document''']['''tokens''']
UpperCAmelCase_ = answer['''start_token''']
UpperCAmelCase_ = answer['''end_token''']
UpperCAmelCase_ = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCAmelCase_ = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCAmelCase_ = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
UpperCAmelCase_ = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
UpperCAmelCase_ = ''' '''.join([old[i] for i in range(len(__A ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , __A , end='''\n''' )
print('''Old:''' , __A , end='''\n\n''' )
return {
"context": " ".join(__A ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def A (__A : Optional[Any] , __A : Optional[int] , __A : Optional[int]=2048 , __A : Union[str, Any]=4096 , __A : List[Any]=True ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_context_and_ans(__A , assertion=__A )
UpperCAmelCase_ = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCAmelCase_ = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
UpperCAmelCase_ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = input_ids[:q_len]
UpperCAmelCase_ = range(__A , len(__A ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCAmelCase_ = i + max_length - q_len
UpperCAmelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(__A ),
"end_token": [-100] * len(__A ),
"category": category,
},
}
UpperCAmelCase_ = out['''context'''].split()
UpperCAmelCase_ = splitted_context[answer['''end_token''']]
UpperCAmelCase_ = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=__A , ).input_ids )
UpperCAmelCase_ = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=__A ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCAmelCase_ = len(tokenizer(__A , add_special_tokens=__A ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCAmelCase_ = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
UpperCAmelCase_ = answer['''start_token''']
UpperCAmelCase_ = answer['''end_token''']
if assertion:
UpperCAmelCase_ = tokenizer.decode(__A )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , __A , end='''\n\n''' )
if len(__A ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCAmelCase_ = input_ids[:q_len]
UpperCAmelCase_ = range(__A , len(__A ) , max_length - doc_stride )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCAmelCase_ = i + max_length - q_len
UpperCAmelCase_ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCAmelCase_ = start_token - i + q_len
UpperCAmelCase_ = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
UpperCAmelCase_ = -100
UpperCAmelCase_ = -100
answers_category.append('''null''' )
UpperCAmelCase_ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__A )
answers_end_token.append(__A )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(__A ) )
print('''Old:''' , tokenizer.decode(__A ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def A (__A : List[str] , __A : Tuple , __A : List[Any]=2048 , __A : Dict=4096 , __A : Union[str, Any]=False ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_strided_contexts_and_ans(
__A , __A , doc_stride=__A , max_length=__A , assertion=__A , )
return example
def A (__A : str , __A : Any ) -> Optional[Any]:
"""simple docstring"""
with jsonlines.open(__A , '''a''' ) as writer:
for example in tqdm(__A , total=len(__A ) , desc='''Saving samples ... ''' ):
UpperCAmelCase_ = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
snake_case_ : Optional[Any] = load_dataset("natural_questions")
snake_case_ : Optional[Any] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
snake_case_ : Dict = data["train" if PROCESS_TRAIN == "true" else "validation"]
snake_case_ : Dict = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
snake_case_ : Dict = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
snake_case_ : Any = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
snake_case_ : Tuple = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 7 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_snake_case , )
assert hasattr(self , '''env''')
def lowerCamelCase ( self : List[Any] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase_ = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_snake_case , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_snake_case , py_version='''py36''' , )
def lowerCamelCase ( self : Optional[int] , _snake_case : str):
"""simple docstring"""
TrainingJobAnalytics(_snake_case).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
@parameterized.expand([(2,)])
def lowerCamelCase ( self : Any , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.create_estimator(_snake_case)
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _snake_case)
| 7 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 1 |
from ...configuration_utils import PretrainedConfig
snake_case_ : Optional[Any] = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class __snake_case ( a ):
UpperCAmelCase__ : List[str] = '''tapas'''
def __init__( self : Any , _snake_case : Union[str, Any]=30522 , _snake_case : Dict=768 , _snake_case : List[Any]=12 , _snake_case : Union[str, Any]=12 , _snake_case : List[str]=3072 , _snake_case : Dict="gelu" , _snake_case : List[Any]=0.1 , _snake_case : int=0.1 , _snake_case : Optional[int]=1024 , _snake_case : Dict=[3, 256, 256, 2, 256, 256, 10] , _snake_case : Tuple=0.0_2 , _snake_case : List[str]=1e-12 , _snake_case : int=0 , _snake_case : Any=1_0.0 , _snake_case : Tuple=0 , _snake_case : Tuple=1.0 , _snake_case : List[Any]=None , _snake_case : List[str]=1.0 , _snake_case : List[str]=False , _snake_case : Optional[Any]=None , _snake_case : Optional[int]=1.0 , _snake_case : List[Any]=1.0 , _snake_case : List[str]=False , _snake_case : List[Any]=False , _snake_case : str="ratio" , _snake_case : Optional[Any]=None , _snake_case : List[str]=None , _snake_case : Union[str, Any]=64 , _snake_case : str=32 , _snake_case : str=False , _snake_case : Dict=True , _snake_case : int=False , _snake_case : Any=False , _snake_case : List[Any]=True , _snake_case : str=False , _snake_case : str=None , _snake_case : Tuple=None , **_snake_case : str , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_sizes
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase_ = positive_label_weight
UpperCAmelCase_ = num_aggregation_labels
UpperCAmelCase_ = aggregation_loss_weight
UpperCAmelCase_ = use_answer_as_supervision
UpperCAmelCase_ = answer_loss_importance
UpperCAmelCase_ = use_normalized_answer_loss
UpperCAmelCase_ = huber_loss_delta
UpperCAmelCase_ = temperature
UpperCAmelCase_ = aggregation_temperature
UpperCAmelCase_ = use_gumbel_for_cells
UpperCAmelCase_ = use_gumbel_for_aggregation
UpperCAmelCase_ = average_approximation_function
UpperCAmelCase_ = cell_selection_preference
UpperCAmelCase_ = answer_loss_cutoff
UpperCAmelCase_ = max_num_rows
UpperCAmelCase_ = max_num_columns
UpperCAmelCase_ = average_logits_per_cell
UpperCAmelCase_ = select_one_column
UpperCAmelCase_ = allow_empty_column_selection
UpperCAmelCase_ = init_cell_selection_weights_to_zero
UpperCAmelCase_ = reset_position_index_per_cell
UpperCAmelCase_ = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase_ = aggregation_labels
UpperCAmelCase_ = no_aggregation_label_index
if isinstance(self.aggregation_labels , _snake_case):
UpperCAmelCase_ = {int(_snake_case): v for k, v in aggregation_labels.items()}
| 7 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( a ):
def __init__( self : Any , _snake_case : VQModel , _snake_case : UNetaDModel , _snake_case : DDIMScheduler):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case)
@torch.no_grad()
def __call__( self : List[str] , _snake_case : int = 1 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : float = 0.0 , _snake_case : int = 50 , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_snake_case , )
UpperCAmelCase_ = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_snake_case)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCAmelCase_ = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCAmelCase_ = {}
if accepts_eta:
UpperCAmelCase_ = eta
for t in self.progress_bar(self.scheduler.timesteps):
UpperCAmelCase_ = self.scheduler.scale_model_input(_snake_case , _snake_case)
# predict the noise residual
UpperCAmelCase_ = self.unet(_snake_case , _snake_case).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
# decode the image latents with the VAE
UpperCAmelCase_ = self.vqvae.decode(_snake_case).sample
UpperCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase_ = self.numpy_to_pil(_snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case)
| 7 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
snake_case_ : List[Any] = Mapping[str, np.ndarray]
snake_case_ : Any = Mapping[str, Any] # Is a nested dict.
snake_case_ : Dict = 0.01
@dataclasses.dataclass(frozen=a )
class __snake_case :
UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCAmelCase__ : Optional[Sequence[int]] = None
def A (__A : str ) -> Protein:
"""simple docstring"""
UpperCAmelCase_ = R'''(\[[A-Z]+\]\n)'''
UpperCAmelCase_ = [tag.strip() for tag in re.split(__A , __A ) if len(__A ) > 0]
UpperCAmelCase_ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
UpperCAmelCase_ = ["N", "CA", "C"]
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCAmelCase_ = g[1][0].strip()
for i in range(len(__A ) ):
if seq[i] not in residue_constants.restypes:
UpperCAmelCase_ = '''X''' # FIXME: strings are immutable
UpperCAmelCase_ = np.array(
[residue_constants.restype_order.get(__A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCAmelCase_ = []
for axis in range(3 ):
tertiary.append(list(map(__A , g[1][axis].split() ) ) )
UpperCAmelCase_ = np.array(__A )
UpperCAmelCase_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase_ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCAmelCase_ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
UpperCAmelCase_ = np.zeros(
(
len(__A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase_ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__A , atom_mask=__A , aatype=__A , residue_index=np.arange(len(__A ) ) , b_factors=__A , )
def A (__A : Protein , __A : int = 0 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
UpperCAmelCase_ = prot.parents
UpperCAmelCase_ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCAmelCase_ = [p for i, p in zip(__A , __A ) if i == chain_id]
if parents is None or len(__A ) == 0:
UpperCAmelCase_ = ['''N/A''']
pdb_headers.append(F"""PARENT {" ".join(__A )}""" )
return pdb_headers
def A (__A : Protein , __A : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = pdb_str.split('''\n''' )
UpperCAmelCase_ = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
UpperCAmelCase_ = 42
if prot.parents is not None and len(prot.parents ) > 0:
UpperCAmelCase_ = []
if prot.parents_chain_index is not None:
UpperCAmelCase_ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__A ) , [] )
parent_dict[str(__A )].append(__A )
UpperCAmelCase_ = max([int(__A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCAmelCase_ = parent_dict.get(str(__A ) , ['''N/A'''] )
parents_per_chain.append(__A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCAmelCase_ = [['''N/A''']]
def make_parent_line(__A : Sequence[str] ) -> str:
return F"""PARENT {" ".join(__A )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCAmelCase_ = 0
for i, l in enumerate(__A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__A ):
UpperCAmelCase_ = parents_per_chain[chain_counter]
else:
UpperCAmelCase_ = ['''N/A''']
out_pdb_lines.append(make_parent_line(__A ) )
return "\n".join(__A )
def A (__A : Protein ) -> str:
"""simple docstring"""
UpperCAmelCase_ = residue_constants.restypes + ['''X''']
def res_atoa(__A : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
UpperCAmelCase_ = residue_constants.atom_types
UpperCAmelCase_ = []
UpperCAmelCase_ = prot.atom_mask
UpperCAmelCase_ = prot.aatype
UpperCAmelCase_ = prot.atom_positions
UpperCAmelCase_ = prot.residue_index.astype(np.intaa )
UpperCAmelCase_ = prot.b_factors
UpperCAmelCase_ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
UpperCAmelCase_ = get_pdb_headers(__A )
if len(__A ) > 0:
pdb_lines.extend(__A )
UpperCAmelCase_ = aatype.shape[0]
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = string.ascii_uppercase
UpperCAmelCase_ = None
# Add all atom sites.
for i in range(__A ):
UpperCAmelCase_ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCAmelCase_ = '''ATOM'''
UpperCAmelCase_ = atom_name if len(__A ) == 4 else F""" {atom_name}"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = 1.00
UpperCAmelCase_ = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = '''A'''
if chain_index is not None:
UpperCAmelCase_ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCAmelCase_ = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__A )
atom_index += 1
UpperCAmelCase_ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCAmelCase_ = True
UpperCAmelCase_ = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCAmelCase_ = '''TER'''
UpperCAmelCase_ = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__A , __A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__A )
def A (__A : Protein ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A (__A : FeatureDict , __A : ModelOutput , __A : Optional[np.ndarray] = None , __A : Optional[np.ndarray] = None , __A : Optional[str] = None , __A : Optional[Sequence[str]] = None , __A : Optional[Sequence[int]] = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__A , remark=__A , parents=__A , parents_chain_index=__A , )
| 7 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __snake_case ( a ):
UpperCAmelCase__ : int = ['''image_processor''']
UpperCAmelCase__ : Tuple = '''SamImageProcessor'''
def __init__( self : Optional[Any] , _snake_case : Any):
"""simple docstring"""
super().__init__(_snake_case)
UpperCAmelCase_ = self.image_processor
UpperCAmelCase_ = -10
UpperCAmelCase_ = self.image_processor.size['''longest_edge''']
def __call__( self : Union[str, Any] , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Tuple=None , _snake_case : List[Any]=None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = self.image_processor(
_snake_case , return_tensors=_snake_case , **_snake_case , )
# pop arguments that are not used in the foward but used nevertheless
UpperCAmelCase_ = encoding_image_processor['''original_sizes''']
if hasattr(_snake_case , '''numpy'''): # Checks if Torch or TF tensor
UpperCAmelCase_ = original_sizes.numpy()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self._check_and_preprocess_points(
input_points=_snake_case , input_labels=_snake_case , input_boxes=_snake_case , )
UpperCAmelCase_ = self._normalize_and_convert(
_snake_case , _snake_case , input_points=_snake_case , input_labels=_snake_case , input_boxes=_snake_case , return_tensors=_snake_case , )
return encoding_image_processor
def lowerCamelCase ( self : List[Any] , _snake_case : Dict , _snake_case : Tuple , _snake_case : List[str]=None , _snake_case : Optional[int]=None , _snake_case : Any=None , _snake_case : Optional[int]="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(_snake_case) != len(_snake_case):
UpperCAmelCase_ = [
self._normalize_coordinates(self.target_size , _snake_case , original_sizes[0]) for point in input_points
]
else:
UpperCAmelCase_ = [
self._normalize_coordinates(self.target_size , _snake_case , _snake_case)
for point, original_size in zip(_snake_case , _snake_case)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
UpperCAmelCase_ , UpperCAmelCase_ = self._pad_points_and_labels(_snake_case , _snake_case)
UpperCAmelCase_ = np.array(_snake_case)
if input_labels is not None:
UpperCAmelCase_ = np.array(_snake_case)
if input_boxes is not None:
if len(_snake_case) != len(_snake_case):
UpperCAmelCase_ = [
self._normalize_coordinates(self.target_size , _snake_case , original_sizes[0] , is_bounding_box=_snake_case)
for box in input_boxes
]
else:
UpperCAmelCase_ = [
self._normalize_coordinates(self.target_size , _snake_case , _snake_case , is_bounding_box=_snake_case)
for box, original_size in zip(_snake_case , _snake_case)
]
UpperCAmelCase_ = np.array(_snake_case)
if input_boxes is not None:
if return_tensors == "pt":
UpperCAmelCase_ = torch.from_numpy(_snake_case)
# boxes batch size of 1 by default
UpperCAmelCase_ = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
UpperCAmelCase_ = tf.convert_to_tensor(_snake_case)
# boxes batch size of 1 by default
UpperCAmelCase_ = tf.expand_dims(_snake_case , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes})
if input_points is not None:
if return_tensors == "pt":
UpperCAmelCase_ = torch.from_numpy(_snake_case)
# point batch size of 1 by default
UpperCAmelCase_ = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
UpperCAmelCase_ = tf.convert_to_tensor(_snake_case)
# point batch size of 1 by default
UpperCAmelCase_ = tf.expand_dims(_snake_case , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points})
if input_labels is not None:
if return_tensors == "pt":
UpperCAmelCase_ = torch.from_numpy(_snake_case)
# point batch size of 1 by default
UpperCAmelCase_ = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
UpperCAmelCase_ = tf.convert_to_tensor(_snake_case)
# point batch size of 1 by default
UpperCAmelCase_ = tf.expand_dims(_snake_case , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels})
return encoding_image_processor
def lowerCamelCase ( self : List[Any] , _snake_case : Tuple , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = max([point.shape[0] for point in input_points])
UpperCAmelCase_ = []
for i, point in enumerate(_snake_case):
if point.shape[0] != expected_nb_points:
UpperCAmelCase_ = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
UpperCAmelCase_ = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(_snake_case)
UpperCAmelCase_ = processed_input_points
return input_points, input_labels
def lowerCamelCase ( self : Dict , _snake_case : int , _snake_case : np.ndarray , _snake_case : Any , _snake_case : Dict=False):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = original_size
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor._get_preprocess_shape(_snake_case , longest_edge=_snake_case)
UpperCAmelCase_ = deepcopy(_snake_case).astype(_snake_case)
if is_bounding_box:
UpperCAmelCase_ = coords.reshape(-1 , 2 , 2)
UpperCAmelCase_ = coords[..., 0] * (new_w / old_w)
UpperCAmelCase_ = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCAmelCase_ = coords.reshape(-1 , 4)
return coords
def lowerCamelCase ( self : int , _snake_case : Optional[Any]=None , _snake_case : int=None , _snake_case : str=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(_snake_case , '''numpy'''): # Checks for TF or Torch tensor
UpperCAmelCase_ = input_points.numpy().tolist()
if not isinstance(_snake_case , _snake_case) or not isinstance(input_points[0] , _snake_case):
raise ValueError('''Input points must be a list of list of floating points.''')
UpperCAmelCase_ = [np.array(_snake_case) for input_point in input_points]
else:
UpperCAmelCase_ = None
if input_labels is not None:
if hasattr(_snake_case , '''numpy'''):
UpperCAmelCase_ = input_labels.numpy().tolist()
if not isinstance(_snake_case , _snake_case) or not isinstance(input_labels[0] , _snake_case):
raise ValueError('''Input labels must be a list of list integers.''')
UpperCAmelCase_ = [np.array(_snake_case) for label in input_labels]
else:
UpperCAmelCase_ = None
if input_boxes is not None:
if hasattr(_snake_case , '''numpy'''):
UpperCAmelCase_ = input_boxes.numpy().tolist()
if (
not isinstance(_snake_case , _snake_case)
or not isinstance(input_boxes[0] , _snake_case)
or not isinstance(input_boxes[0][0] , _snake_case)
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''')
UpperCAmelCase_ = [np.array(_snake_case).astype(np.floataa) for box in input_boxes]
else:
UpperCAmelCase_ = None
return input_points, input_labels, input_boxes
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(_snake_case))
def lowerCamelCase ( self : Tuple , *_snake_case : Tuple , **_snake_case : int):
"""simple docstring"""
return self.image_processor.post_process_masks(*_snake_case , **_snake_case)
| 7 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 1 |
def A (__A : int , __A : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def A () -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 7 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
snake_case_ : Dict = None
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
snake_case_ : Union[str, Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
snake_case_ : int = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
snake_case_ : Any = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __snake_case ( a ):
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : Tuple = NllbTokenizer
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self : Dict , _snake_case : List[Any]=None , _snake_case : Union[str, Any]=None , _snake_case : List[Any]="<s>" , _snake_case : Optional[Any]="</s>" , _snake_case : Optional[int]="</s>" , _snake_case : Tuple="<s>" , _snake_case : Tuple="<unk>" , _snake_case : int="<pad>" , _snake_case : List[str]="<mask>" , _snake_case : Union[str, Any]=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=False , **_snake_case : Tuple , ):
"""simple docstring"""
UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else mask_token
UpperCAmelCase_ = legacy_behaviour
super().__init__(
vocab_file=_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , legacy_behaviour=_snake_case , **_snake_case , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
UpperCAmelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens])
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens})
UpperCAmelCase_ = {
lang_code: self.convert_tokens_to_ids(_snake_case) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
UpperCAmelCase_ = src_lang if src_lang is not None else '''eng_Latn'''
UpperCAmelCase_ = self.convert_tokens_to_ids(self._src_lang)
UpperCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase ( self : Any , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def lowerCamelCase ( self : Dict , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[str] , _snake_case : Optional[str] , **_snake_case : Any):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''')
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case)
UpperCAmelCase_ = self.convert_tokens_to_ids(_snake_case)
UpperCAmelCase_ = tgt_lang_id
return inputs
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : str = "eng_Latn" , _snake_case : Optional[List[str]] = None , _snake_case : str = "fra_Latn" , **_snake_case : Optional[int] , ):
"""simple docstring"""
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def lowerCamelCase ( self : Optional[Any] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.convert_tokens_to_ids(_snake_case)
if self.legacy_behaviour:
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_ = [self.cur_lang_code]
UpperCAmelCase_ = [self.eos_token_id]
UpperCAmelCase_ = self.convert_ids_to_tokens(self.prefix_tokens)
UpperCAmelCase_ = self.convert_ids_to_tokens(self.suffix_tokens)
UpperCAmelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase ( self : List[str] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.convert_tokens_to_ids(_snake_case)
if self.legacy_behaviour:
UpperCAmelCase_ = []
UpperCAmelCase_ = [self.eos_token_id, self.cur_lang_code]
else:
UpperCAmelCase_ = [self.cur_lang_code]
UpperCAmelCase_ = [self.eos_token_id]
UpperCAmelCase_ = self.convert_ids_to_tokens(self.prefix_tokens)
UpperCAmelCase_ = self.convert_ids_to_tokens(self.suffix_tokens)
UpperCAmelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens)) , )
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(_snake_case):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""")
return
UpperCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case):
copyfile(self.vocab_file , _snake_case)
return (out_vocab_file,)
| 7 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
def __init__( self : Union[str, Any] , _snake_case : int , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = 13
UpperCAmelCase_ = 7
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = 2
UpperCAmelCase_ = 99
UpperCAmelCase_ = 0
UpperCAmelCase_ = 32
UpperCAmelCase_ = 2
UpperCAmelCase_ = 4
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = 0.1
UpperCAmelCase_ = 512
UpperCAmelCase_ = 16
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0.0_2
UpperCAmelCase_ = 3
UpperCAmelCase_ = 4
UpperCAmelCase_ = '''last'''
UpperCAmelCase_ = True
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa)
UpperCAmelCase_ = None
if self.use_input_lengths:
UpperCAmelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Dict , ):
"""simple docstring"""
UpperCAmelCase_ = TFFlaubertModel(config=_snake_case)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ = model(_snake_case)
UpperCAmelCase_ = [input_ids, input_mask]
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : Any , _snake_case : Any , _snake_case : Any , _snake_case : List[str] , ):
"""simple docstring"""
UpperCAmelCase_ = TFFlaubertWithLMHeadModel(_snake_case)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : List[str] , _snake_case : str , _snake_case : str , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , ):
"""simple docstring"""
UpperCAmelCase_ = TFFlaubertForQuestionAnsweringSimple(_snake_case)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : int , _snake_case : Tuple , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : str , _snake_case : str , _snake_case : List[Any] , _snake_case : Union[str, Any] , ):
"""simple docstring"""
UpperCAmelCase_ = TFFlaubertForSequenceClassification(_snake_case)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def lowerCamelCase ( self : Optional[Any] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : str , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict , _snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = TFFlaubertForTokenClassification(config=_snake_case)
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : List[str] , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Tuple , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : Tuple , ):
"""simple docstring"""
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = TFFlaubertForMultipleChoice(config=_snake_case)
UpperCAmelCase_ = tf.tile(tf.expand_dims(_snake_case , 1) , (1, self.num_choices, 1))
UpperCAmelCase_ = tf.tile(tf.expand_dims(_snake_case , 1) , (1, self.num_choices, 1))
UpperCAmelCase_ = tf.tile(tf.expand_dims(_snake_case , 1) , (1, self.num_choices, 1))
UpperCAmelCase_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Any = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Optional[int] = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Dict = False
def lowerCamelCase ( self : Tuple , _snake_case : List[str] , _snake_case : str , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = TFFlaubertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , emb_dim=37)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_snake_case)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_snake_case)
@slow
def lowerCamelCase ( self : Any):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFFlaubertModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''')
UpperCAmelCase_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCAmelCase_ = model(_snake_case)[0]
UpperCAmelCase_ = tf.TensorShape((1, 8, 512))
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 7 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( a , a ):
UpperCAmelCase__ : List[str] = '''swin'''
UpperCAmelCase__ : Any = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple , _snake_case : Union[str, Any]=224 , _snake_case : Optional[int]=4 , _snake_case : List[str]=3 , _snake_case : Dict=96 , _snake_case : int=[2, 2, 6, 2] , _snake_case : List[Any]=[3, 6, 12, 24] , _snake_case : Dict=7 , _snake_case : List[str]=4.0 , _snake_case : List[Any]=True , _snake_case : Any=0.0 , _snake_case : int=0.0 , _snake_case : Tuple=0.1 , _snake_case : str="gelu" , _snake_case : Tuple=False , _snake_case : Tuple=0.0_2 , _snake_case : Optional[int]=1e-5 , _snake_case : List[Any]=32 , _snake_case : int=None , _snake_case : Tuple=None , **_snake_case : str , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = len(_snake_case)
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ = int(embed_dim * 2 ** (len(_snake_case) - 1))
UpperCAmelCase_ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_snake_case) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names)
class __snake_case ( a ):
UpperCAmelCase__ : Union[str, Any] = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
return 1e-4
| 7 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Optional[Any] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["ConvNextFeatureExtractor"]
snake_case_ : List[Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 7 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : int = (KDPMaDiscreteScheduler,)
UpperCAmelCase__ : List[str] = 1_0
def lowerCamelCase ( self : str , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(prediction_type='''v_prediction''')
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(self.num_inference_steps)
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ = sample.to(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = scheduler.scale_model_input(_snake_case , _snake_case)
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = output.prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(_snake_case))
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
if torch_device == "mps":
return
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(self.num_inference_steps)
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ = sample.to(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = scheduler.scale_model_input(_snake_case , _snake_case)
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = output.prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(_snake_case))
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6) < 1e-3
def lowerCamelCase ( self : Any):
"""simple docstring"""
if torch_device == "mps":
return
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case)
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.to(_snake_case) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase_ = scheduler.scale_model_input(_snake_case , _snake_case)
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case)
UpperCAmelCase_ = output.prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(_snake_case))
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
if str(_snake_case).startswith('''cpu'''):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6) < 1e-3
| 7 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
snake_case_ : List[Any] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def A (__A : str , __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
inspect_dataset(__A , __A )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def A (__A : int , __A : Dict ) -> Dict:
"""simple docstring"""
inspect_metric(__A , __A )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(__A )
assert "__pycache__" not in os.listdir(__A )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def A (__A : Any , __A : Optional[Any] , __A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_config_info(__A , config_name=__A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def A (__A : int , __A : Union[str, Any] , __A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_config_info(__A , config_name=__A )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def A (__A : int , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_config_names(__A )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def A (__A : List[str] , __A : Any , __A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_infos(__A )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def A (__A : Any , __A : Optional[Any] , __A : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = get_dataset_infos(__A )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def A (__A : Union[str, Any] , __A : Tuple , __A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with pytest.raises(__A ):
get_dataset_split_names(__A , config_name=__A )
| 7 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 1 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 1 |
import argparse
import json
import subprocess
def A (__A : List[str] , __A : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
UpperCAmelCase_ = subprocess.run(__A , shell=__A , stdout=subprocess.PIPE )
UpperCAmelCase_ = output.stdout.decode('''utf-8''' )
UpperCAmelCase_ = json.loads(__A )
UpperCAmelCase_ = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__A )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__A ) )
if len(__A ) > 0:
UpperCAmelCase_ = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def A (__A : Optional[int] ) -> List[Any]:
"""simple docstring"""
return values.split(''',''' )
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
snake_case_ : Union[str, Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 7 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
def __init__( self : Dict , _snake_case : Any , _snake_case : List[Any]=13 , _snake_case : List[str]=2 , _snake_case : Union[str, Any]=24 , _snake_case : Tuple=16 , _snake_case : Optional[int]=True , _snake_case : Dict=True , _snake_case : int=32 , _snake_case : Optional[int]=5 , _snake_case : str=4 , _snake_case : Optional[int]=37 , _snake_case : Any="gelu" , _snake_case : Optional[Any]=0.1 , _snake_case : str=0.1 , _snake_case : List[Any]=10 , _snake_case : Union[str, Any]=0.0_2 , _snake_case : Any=None , _snake_case : Tuple=2 , _snake_case : int=2 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = max_length
UpperCAmelCase_ = num_mel_bins
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = frequency_stride
UpperCAmelCase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCAmelCase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCAmelCase_ = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCAmelCase_ = frequency_out_dimension * time_out_dimension
UpperCAmelCase_ = num_patches + 2
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = self.get_config()
return config, input_values, labels
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCamelCase ( self : Dict , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = ASTModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : List[Any] = False
def lowerCamelCase ( self : List[str] , _snake_case : int , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = ASTModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear))
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''input_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
@slow
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = ASTModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
UpperCAmelCase_ , UpperCAmelCase_ = torchaudio.load(__A )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''')
if is_torchaudio_available()
else None
)
@slow
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.default_feature_extractor
UpperCAmelCase_ = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(_snake_case)
UpperCAmelCase_ = self.default_feature_extractor
UpperCAmelCase_ , UpperCAmelCase_ = prepare_audio()
UpperCAmelCase_ = audio.squeeze().numpy()
UpperCAmelCase_ = feature_extractor(_snake_case , sampling_rate=_snake_case , return_tensors='''pt''').to(_snake_case)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 527))
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
from __future__ import annotations
import requests
snake_case_ : Any = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def A (__A : str , __A : int = 1 , __A : str = "new" , __A : list | None = None ) -> dict:
"""simple docstring"""
UpperCAmelCase_ = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__A ) - valid_terms ) ):
UpperCAmelCase_ = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__A )
UpperCAmelCase_ = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
UpperCAmelCase_ = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__A )}
UpperCAmelCase_ = {}
for id_ in range(__A ):
UpperCAmelCase_ = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 7 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Dict = logging.get_logger(__name__)
def A (__A : Optional[int] , __A : Dict=False , __A : str=False ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""{prefix}blocks.{i}.norm1.weight""", F"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm1.bias""", F"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.weight""", F"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""{prefix}blocks.{i}.attn.proj.bias""", F"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.weight""", F"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.norm2.bias""", F"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.weight""", F"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc1.bias""", F"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.weight""", F"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""{prefix}blocks.{i}.mlp.fc2.bias""", F"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"""{prefix}cls_token""", '''beit.embeddings.cls_token'''),
(F"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def A (__A : Dict , __A : Union[str, Any] , __A : List[str]=False , __A : Optional[int]=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
UpperCAmelCase_ = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.attn.q_bias""" )
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.attn.v_bias""" )
UpperCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ = q_bias
UpperCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.gamma_1""" )
UpperCAmelCase_ = state_dict.pop(F"""{prefix}blocks.{i}.gamma_2""" )
UpperCAmelCase_ = gamma_a
UpperCAmelCase_ = gamma_a
def A (__A : Optional[Any] , __A : List[str] , __A : Any ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ = dct.pop(__A )
UpperCAmelCase_ = val
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A (__A : str , __A : List[str] , __A : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase_ = BeitConfig(use_absolute_position_embeddings=__A , use_mask_token=__A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase_ = 1024
UpperCAmelCase_ = 4096
UpperCAmelCase_ = 24
UpperCAmelCase_ = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase_ = 16
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''rvlcdip-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' )['''model''']
UpperCAmelCase_ = create_rename_keys(__A , has_lm_head=__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , has_lm_head=__A )
# load HuggingFace model
UpperCAmelCase_ = BeitForMaskedImageModeling(__A ) if has_lm_head else BeitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# Check outputs on an image
UpperCAmelCase_ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__A )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=__A , return_tensors='''pt''' )
UpperCAmelCase_ = encoding['''pixel_values''']
UpperCAmelCase_ = model(__A )
UpperCAmelCase_ = outputs.logits
# verify logits
UpperCAmelCase_ = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(__A ), "Shape of logits not as expected"
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if push_to_hub:
if has_lm_head:
UpperCAmelCase_ = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase_ = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__A , )
model.push_to_hub(
repo_path_or_name=Path(__A , __A ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__A , )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
snake_case_ : List[Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
snake_case_ : int = False
@skip_mps
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : Any = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCAmelCase__ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Any = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase ( cls : Optional[int]):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Dict , _snake_case : Optional[int] , _snake_case : List[Any]=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = UpperCAmelCase_ = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = pipe(**_snake_case).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3))
UpperCAmelCase_ = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6])
UpperCAmelCase_ = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(_snake_case , 1e-3)
def lowerCamelCase ( self : str):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5e-4)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4e-4)
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Any):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_snake_case)
@classmethod
def lowerCamelCase ( cls : Tuple):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = torch.manual_seed(51)
UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=_snake_case , torch_dtype=torch.floataa)
pipe.to('''cuda''')
UpperCAmelCase_ = '''a painting of an elephant with glasses'''
UpperCAmelCase_ = [5, 7]
UpperCAmelCase_ = pipe(
prompt=_snake_case , token_indices=_snake_case , guidance_scale=7.5 , generator=_snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''')
assert np.abs((expected_image - image).max()) < 5e-1
| 7 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 1 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 1 |
snake_case_ : Dict = "Alexander Joslin"
import operator as op
from .stack import Stack
def A (__A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
UpperCAmelCase_ = Stack()
UpperCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
UpperCAmelCase_ = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase_ = operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
snake_case_ : Optional[int] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 7 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A (__A : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
for line in lines:
UpperCAmelCase_ = re.sub(R'''#.*''' , '''''' , __A ) # remove comments
if line:
filtered_lines.append(__A )
UpperCAmelCase_ = '''\n'''.join(__A )
# Make a hash from all this code
UpperCAmelCase_ = full_str.encode('''utf-8''' )
return shaaaa(__A ).hexdigest()
# get importable module names and hash for caching
snake_case_ : Dict = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
snake_case_ : Any = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
snake_case_ : Tuple = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
snake_case_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 7 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def A (__A : List[str] , __A : int , __A : Optional[Any] , __A : int=1024 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = [], []
UpperCAmelCase_ = list(zip(__A , __A ) )
UpperCAmelCase_ , UpperCAmelCase_ = sorted_examples[0]
def is_too_big(__A : Optional[Any] ):
return tok(__A , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
UpperCAmelCase_ = new_src + ''' ''' + src
UpperCAmelCase_ = new_tgt + ''' ''' + tgt
if is_too_big(__A ) or is_too_big(__A ): # cant fit, finalize example
finished_src.append(__A )
finished_tgt.append(__A )
UpperCAmelCase_ , UpperCAmelCase_ = src, tgt
else: # can fit, keep adding
UpperCAmelCase_ , UpperCAmelCase_ = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__A )
finished_tgt.append(__A )
return finished_src, finished_tgt
def A (__A : Optional[Any] , __A : Path , __A : str , __A : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = Path(__A )
save_path.mkdir(exist_ok=__A )
for split in ["train"]:
UpperCAmelCase_ , UpperCAmelCase_ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
UpperCAmelCase_ = [x.rstrip() for x in Path(__A ).open().readlines()]
UpperCAmelCase_ = [x.rstrip() for x in Path(__A ).open().readlines()]
UpperCAmelCase_ , UpperCAmelCase_ = pack_examples(__A , __A , __A , __A )
print(F"""packed {split} split from {len(__A )} examples -> {len(__A )}.""" )
Path(save_path / F"""{split}.source""" ).open('''w''' ).write('''\n'''.join(__A ) )
Path(save_path / F"""{split}.target""" ).open('''w''' ).write('''\n'''.join(__A ) )
for split in ["val", "test"]:
UpperCAmelCase_ , UpperCAmelCase_ = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(__A , save_path / F"""{split}.source""" )
shutil.copyfile(__A , save_path / F"""{split}.target""" )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=__A , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=__A , default=128 )
parser.add_argument('''--data_dir''' , type=__A )
parser.add_argument('''--save_path''' , type=__A )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__A , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 7 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
snake_case_ : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __snake_case ( datasets.BuilderConfig ):
UpperCAmelCase__ : Optional[datasets.Features] = None
UpperCAmelCase__ : str = "utf-8"
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : bool = True # deprecated
UpperCAmelCase__ : Optional[int] = None # deprecated
UpperCAmelCase__ : int = 1_0 << 2_0 # 10MB
UpperCAmelCase__ : Optional[bool] = None
class __snake_case ( datasets.ArrowBasedBuilder ):
UpperCAmelCase__ : Optional[int] = JsonConfig
def lowerCamelCase ( self : Any):
"""simple docstring"""
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''')
UpperCAmelCase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''')
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''')
return datasets.DatasetInfo(features=self.config.features)
def lowerCamelCase ( self : Dict , _snake_case : Optional[int]):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""")
UpperCAmelCase_ = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_snake_case , (str, list, tuple)):
UpperCAmelCase_ = data_files
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_snake_case) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
UpperCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = [files]
UpperCAmelCase_ = [dl_manager.iter_files(_snake_case) for file in files]
splits.append(datasets.SplitGenerator(name=_snake_case , gen_kwargs={'''files''': files}))
return splits
def lowerCamelCase ( self : str , _snake_case : pa.Table):
"""simple docstring"""
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
UpperCAmelCase_ = self.config.features.arrow_schema.field(_snake_case).type
UpperCAmelCase_ = pa_table.append_column(_snake_case , pa.array([None] * len(_snake_case) , type=_snake_case))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCAmelCase_ = table_cast(_snake_case , self.config.features.arrow_schema)
return pa_table
def lowerCamelCase ( self : Optional[Any] , _snake_case : str):
"""simple docstring"""
for file_idx, file in enumerate(itertools.chain.from_iterable(_snake_case)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
UpperCAmelCase_ = json.load(_snake_case)
# We keep only the field we are interested in
UpperCAmelCase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_snake_case , (list, tuple)):
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset])
UpperCAmelCase_ = {col: [row.get(_snake_case) for row in dataset] for col in keys}
else:
UpperCAmelCase_ = dataset
UpperCAmelCase_ = pa.Table.from_pydict(_snake_case)
yield file_idx, self._cast_table(_snake_case)
# If the file has one json object per line
else:
with open(_snake_case , '''rb''') as f:
UpperCAmelCase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCAmelCase_ = max(self.config.chunksize // 32 , 16 << 10)
UpperCAmelCase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
UpperCAmelCase_ = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_snake_case)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCAmelCase_ = batch.decode(self.config.encoding , errors=_snake_case).encode('''utf-8''')
try:
while True:
try:
UpperCAmelCase_ = paj.read_json(
io.BytesIO(_snake_case) , read_options=paj.ReadOptions(block_size=_snake_case))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_snake_case , pa.ArrowInvalid)
and "straddling" not in str(_snake_case)
or block_size > len(_snake_case)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_snake_case)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
UpperCAmelCase_ = json.load(_snake_case)
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_snake_case)}: {e}""")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_snake_case , _snake_case): # list is the only sequence type supported in JSON
try:
UpperCAmelCase_ = set().union(*[row.keys() for row in dataset])
UpperCAmelCase_ = {col: [row.get(_snake_case) for row in dataset] for col in keys}
UpperCAmelCase_ = pa.Table.from_pydict(_snake_case)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_snake_case)}: {e}""")
raise ValueError(F"""Not able to read records in the JSON file at {file}.""") from None
yield file_idx, self._cast_table(_snake_case)
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_snake_case)}: {e}""")
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys()))}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_snake_case)
batch_idx += 1
| 7 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 1 |
def A (__A : dict ) -> set:
"""simple docstring"""
UpperCAmelCase_ = set()
# edges = list of graph's edges
UpperCAmelCase_ = get_edges(__A )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase_ , UpperCAmelCase_ = edges.pop()
chosen_vertices.add(__A )
chosen_vertices.add(__A )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(__A )
return chosen_vertices
def A (__A : dict ) -> set:
"""simple docstring"""
UpperCAmelCase_ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 7 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
snake_case_ : Optional[int] = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''facebook/nllb-200-distilled-600M'''
UpperCAmelCase__ : Optional[int] = (
'''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '''
'''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '''
'''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '''
'''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'''
)
UpperCAmelCase__ : Tuple = '''translator'''
UpperCAmelCase__ : Dict = AutoTokenizer
UpperCAmelCase__ : List[str] = AutoModelForSeqaSeqLM
UpperCAmelCase__ : str = LANGUAGE_CODES
UpperCAmelCase__ : Any = ['''text''', '''text''', '''text''']
UpperCAmelCase__ : Optional[int] = ['''text''']
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : Any , _snake_case : Any):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""")
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""")
UpperCAmelCase_ = self.lang_to_code[src_lang]
UpperCAmelCase_ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_snake_case , return_tensors='''pt''' , src_lang=_snake_case , tgt_lang=_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any]):
"""simple docstring"""
return self.model.generate(**_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : Tuple):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_snake_case)
| 7 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 1 |
def A (__A : int | float | str ) -> tuple[int, int]:
"""simple docstring"""
try:
UpperCAmelCase_ = float(__A )
except ValueError:
raise ValueError('''Please enter a valid number''' )
UpperCAmelCase_ = decimal - int(__A )
if fractional_part == 0:
return int(__A ), 1
else:
UpperCAmelCase_ = len(str(__A ).split('''.''' )[1] )
UpperCAmelCase_ = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase_ = 10**number_of_frac_digits
UpperCAmelCase_ , UpperCAmelCase_ = denominator, numerator
while True:
UpperCAmelCase_ = dividend % divisor
if remainder == 0:
break
UpperCAmelCase_ , UpperCAmelCase_ = divisor, remainder
UpperCAmelCase_ , UpperCAmelCase_ = numerator / divisor, denominator / divisor
return int(__A ), int(__A )
if __name__ == "__main__":
print(f"{decimal_to_fraction(2) = }")
print(f"{decimal_to_fraction(89.0) = }")
print(f"{decimal_to_fraction('67') = }")
print(f"{decimal_to_fraction('45.0') = }")
print(f"{decimal_to_fraction(1.5) = }")
print(f"{decimal_to_fraction('6.25') = }")
print(f"{decimal_to_fraction('78td') = }")
| 7 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 1 |
def A (__A : int = 600851475143 ) -> int:
"""simple docstring"""
try:
UpperCAmelCase_ = int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
UpperCAmelCase_ = 2
UpperCAmelCase_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase_ = i
while n % i == 0:
UpperCAmelCase_ = n // i
i += 1
return int(__A )
if __name__ == "__main__":
print(f"{solution() = }")
| 7 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Any = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
snake_case_ : List[Any] = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
snake_case_ : Dict = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
snake_case_ : List[Any] = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
snake_case_ : List[Any] = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
snake_case_ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
snake_case_ : Tuple = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def A (__A : List[Any] ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def A (__A : Any , __A : Union[str, Any] , __A : Optional[int] , __A : Tuple , __A : Tuple=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def A (__A : Optional[int] , __A : Optional[int] , __A : Union[str, Any] , __A : Dict , __A : str=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A (__A : str , __A : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint['''time_embed.0.weight''']
UpperCAmelCase_ = checkpoint['''time_embed.0.bias''']
UpperCAmelCase_ = checkpoint['''time_embed.2.weight''']
UpperCAmelCase_ = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint['''label_emb.weight''']
UpperCAmelCase_ = checkpoint['''input_blocks.0.0.weight''']
UpperCAmelCase_ = checkpoint['''input_blocks.0.0.bias''']
UpperCAmelCase_ = unet_config['''down_block_types''']
UpperCAmelCase_ = unet_config['''layers_per_block''']
UpperCAmelCase_ = unet_config['''attention_head_dim''']
UpperCAmelCase_ = unet_config['''block_out_channels''']
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(__A ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__A ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A , has_skip=__A )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__A ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A , has_skip=__A )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
__A , __A , __A , __A , __A )
current_layer += 1
if i != len(__A ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = '''mid_block.resnets.0'''
UpperCAmelCase_ = '''middle_block.0'''
UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A )
UpperCAmelCase_ = '''mid_block.attentions.0'''
UpperCAmelCase_ = '''middle_block.1'''
UpperCAmelCase_ = convert_attention(__A , __A , __A , __A , __A )
UpperCAmelCase_ = '''mid_block.resnets.1'''
UpperCAmelCase_ = '''middle_block.2'''
UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config['''up_block_types''']
for i, layer_type in enumerate(__A ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A , has_skip=__A )
current_layer += 1
if i != len(__A ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A , has_skip=__A )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
__A , __A , __A , __A , __A )
current_layer += 1
if i != len(__A ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(__A , __A , __A , __A )
UpperCAmelCase_ = checkpoint['''out.0.weight''']
UpperCAmelCase_ = checkpoint['''out.0.bias''']
UpperCAmelCase_ = checkpoint['''out.2.weight''']
UpperCAmelCase_ = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
snake_case_ : Any = parser.parse_args()
snake_case_ : Optional[int] = strabool(args.class_cond)
snake_case_ : Optional[Any] = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
snake_case_ : Tuple = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case_ : List[str] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
snake_case_ : str = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
snake_case_ : List[str] = None
snake_case_ : Optional[Any] = con_pt_to_diffuser(args.unet_path, unet_config)
snake_case_ : Dict = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
snake_case_ : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
snake_case_ : Tuple = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
snake_case_ : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
snake_case_ : List[Any] = CMStochasticIterativeScheduler(**scheduler_config)
snake_case_ : Any = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 7 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A (*__A : List[str] , __A : Optional[Union[Dict, Any]] = None , __A : str=True , __A : str=2 ) -> str:
"""simple docstring"""
from .. import __version__
UpperCAmelCase_ = take_from
UpperCAmelCase_ = ()
if not isinstance(args[0] , __A ):
UpperCAmelCase_ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__A ).base_version ) >= version.parse(__A ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
UpperCAmelCase_ = None
if isinstance(__A , __A ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__A ),)
UpperCAmelCase_ = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(__A , __A ):
values += (getattr(__A , __A ),)
UpperCAmelCase_ = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
UpperCAmelCase_ = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
UpperCAmelCase_ = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , __A , stacklevel=__A )
if isinstance(__A , __A ) and len(__A ) > 0:
UpperCAmelCase_ = inspect.getouterframes(inspect.currentframe() )[1]
UpperCAmelCase_ = call_frame.filename
UpperCAmelCase_ = call_frame.lineno
UpperCAmelCase_ = call_frame.function
UpperCAmelCase_ , UpperCAmelCase_ = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(__A ) == 0:
return
elif len(__A ) == 1:
return values[0]
return values
| 7 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ : Optional[Any] = logging.get_logger(__name__)
class __snake_case ( enum.Enum ):
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Optional[int] = 1
@add_end_docstrings(a )
class __snake_case ( a ):
UpperCAmelCase__ : Dict = '''generated'''
def __init__( self : Optional[int] , *_snake_case : Optional[Any] , **_snake_case : Tuple):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
def lowerCamelCase ( self : Optional[int] , _snake_case : Optional[int]=None , _snake_case : Optional[int]=None , _snake_case : Tuple=None , _snake_case : Tuple=None , _snake_case : Any=None , _snake_case : Optional[int]=None , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = {}
if truncation is not None:
UpperCAmelCase_ = truncation
UpperCAmelCase_ = generate_kwargs
UpperCAmelCase_ = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase_ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase_ = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase_ = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase_ = self.tokenizer.encode(_snake_case , add_special_tokens=_snake_case)
if len(_snake_case) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''')
UpperCAmelCase_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowerCamelCase ( self : List[str] , _snake_case : int , _snake_case : int , _snake_case : int):
"""simple docstring"""
return True
def lowerCamelCase ( self : Union[str, Any] , *_snake_case : Union[str, Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _snake_case):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''')
UpperCAmelCase_ = ([prefix + arg for arg in args[0]],)
UpperCAmelCase_ = True
elif isinstance(args[0] , _snake_case):
UpperCAmelCase_ = (prefix + args[0],)
UpperCAmelCase_ = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""")
UpperCAmelCase_ = self.tokenizer(*_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors=self.framework)
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[Any] , *_snake_case : str , **_snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = super().__call__(*_snake_case , **_snake_case)
if (
isinstance(args[0] , _snake_case)
and all(isinstance(_snake_case , _snake_case) for el in args[0])
and all(len(_snake_case) == 1 for res in result)
):
return [res[0] for res in result]
return result
def lowerCamelCase ( self : Tuple , _snake_case : Any , _snake_case : Dict=TruncationStrategy.DO_NOT_TRUNCATE , **_snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self._parse_and_tokenize(_snake_case , truncation=_snake_case , **_snake_case)
return inputs
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , **_snake_case : List[str]):
"""simple docstring"""
if self.framework == "pt":
UpperCAmelCase_ , UpperCAmelCase_ = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase_ , UpperCAmelCase_ = tf.shape(model_inputs['''input_ids''']).numpy()
UpperCAmelCase_ = generate_kwargs.get('''min_length''' , self.model.config.min_length)
UpperCAmelCase_ = generate_kwargs.get('''max_length''' , self.model.config.max_length)
self.check_inputs(_snake_case , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''])
UpperCAmelCase_ = self.model.generate(**_snake_case , **_snake_case)
UpperCAmelCase_ = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase_ = output_ids.reshape(_snake_case , out_b // in_b , *output_ids.shape[1:])
elif self.framework == "tf":
UpperCAmelCase_ = tf.reshape(_snake_case , (in_b, out_b // in_b, *output_ids.shape[1:]))
return {"output_ids": output_ids}
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Any=ReturnType.TEXT , _snake_case : Dict=False):
"""simple docstring"""
UpperCAmelCase_ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase_ = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase_ = {
F"""{self.return_name}_text""": self.tokenizer.decode(
_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case , )
}
records.append(_snake_case)
return records
@add_end_docstrings(a )
class __snake_case ( a ):
UpperCAmelCase__ : Union[str, Any] = '''summary'''
def __call__( self : Dict , *_snake_case : Dict , **_snake_case : Dict):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : int , _snake_case : int):
"""simple docstring"""
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""")
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""")
@add_end_docstrings(a )
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = '''translation'''
def lowerCamelCase ( self : Any , _snake_case : int , _snake_case : int , _snake_case : int):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''')
return True
def lowerCamelCase ( self : Any , *_snake_case : Any , _snake_case : Optional[Any]=TruncationStrategy.DO_NOT_TRUNCATE , _snake_case : int=None , _snake_case : str=None):
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _snake_case):
return self.tokenizer._build_translation_inputs(
*_snake_case , return_tensors=self.framework , truncation=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case)
else:
return super()._parse_and_tokenize(*_snake_case , truncation=_snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]=None , _snake_case : Dict=None , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = super()._sanitize_parameters(**_snake_case)
if src_lang is not None:
UpperCAmelCase_ = src_lang
if tgt_lang is not None:
UpperCAmelCase_ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase_ = kwargs.get('''task''' , self.task)
UpperCAmelCase_ = task.split('''_''')
if task and len(_snake_case) == 4:
# translation, XX, to YY
UpperCAmelCase_ = items[1]
UpperCAmelCase_ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : Optional[Any]):
"""simple docstring"""
return super().__call__(*_snake_case , **_snake_case)
| 7 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 1 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
snake_case_ : Optional[Any] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
snake_case_ : Tuple = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
snake_case_ : str = BeautifulSoup(res.text, "html.parser")
snake_case_ : Union[str, Any] = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(f"https://google.com{link.get('href')}")
| 7 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 1 |
from collections import deque
class __snake_case :
def __init__( self : Tuple , _snake_case : str , _snake_case : int , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = process_name # process name
UpperCAmelCase_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCAmelCase_ = arrival_time
UpperCAmelCase_ = burst_time # remaining burst time
UpperCAmelCase_ = 0 # total time of the process wait in ready queue
UpperCAmelCase_ = 0 # time from arrival time to completion time
class __snake_case :
def __init__( self : Tuple , _snake_case : int , _snake_case : list[int] , _snake_case : deque[Process] , _snake_case : int , ):
"""simple docstring"""
UpperCAmelCase_ = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCAmelCase_ = time_slices
# unfinished process is in this ready_queue
UpperCAmelCase_ = queue
# current time
UpperCAmelCase_ = current_time
# finished process is in this sequence queue
UpperCAmelCase_ = deque()
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def lowerCamelCase ( self : Optional[Any] , _snake_case : list[Process]):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def lowerCamelCase ( self : Dict , _snake_case : list[Process]):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def lowerCamelCase ( self : Optional[Any] , _snake_case : list[Process]):
"""simple docstring"""
UpperCAmelCase_ = []
for i in range(len(_snake_case)):
completion_times.append(queue[i].stop_time)
return completion_times
def lowerCamelCase ( self : Dict , _snake_case : deque[Process]):
"""simple docstring"""
return [q.burst_time for q in queue]
def lowerCamelCase ( self : Tuple , _snake_case : Process):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase ( self : List[str] , _snake_case : deque[Process]):
"""simple docstring"""
UpperCAmelCase_ = deque() # sequence deque of finished process
while len(_snake_case) != 0:
UpperCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_snake_case)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCAmelCase_ = 0
# set the process's turnaround time because it is finished
UpperCAmelCase_ = self.current_time - cp.arrival_time
# set the completion time
UpperCAmelCase_ = self.current_time
# add the process to queue that has finished queue
finished.append(_snake_case)
self.finish_queue.extend(_snake_case) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase ( self : Dict , _snake_case : deque[Process] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_snake_case)):
UpperCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_snake_case)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCAmelCase_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_snake_case)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCAmelCase_ = 0
# set the finish time
UpperCAmelCase_ = self.current_time
# update the process' turnaround time because it is finished
UpperCAmelCase_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_snake_case)
self.finish_queue.extend(_snake_case) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for i in range(self.number_of_queues - 1):
UpperCAmelCase_ , UpperCAmelCase_ = self.round_robin(
self.ready_queue , self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
snake_case_ : Union[str, Any] = Process("P1", 0, 53)
snake_case_ : List[Any] = Process("P2", 0, 17)
snake_case_ : Tuple = Process("P3", 0, 68)
snake_case_ : Optional[Any] = Process("P4", 0, 24)
snake_case_ : Dict = 3
snake_case_ : Optional[Any] = [17, 25]
snake_case_ : List[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
snake_case_ : int = Process("P1", 0, 53)
snake_case_ : Tuple = Process("P2", 0, 17)
snake_case_ : Union[str, Any] = Process("P3", 0, 68)
snake_case_ : Optional[Any] = Process("P4", 0, 24)
snake_case_ : str = 3
snake_case_ : str = [17, 25]
snake_case_ : List[str] = deque([Pa, Pa, Pa, Pa])
snake_case_ : int = MLFQ(number_of_queues, time_slices, queue, 0)
snake_case_ : Optional[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
f"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
)
| 7 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case_ : List[str] = logging.get_logger(__name__)
def A (__A : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__A ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class __snake_case ( a ):
UpperCAmelCase__ : int = ['''pixel_values''']
def __init__( self : List[Any] , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : bool = True , _snake_case : Dict[str, int] = None , _snake_case : bool = True , _snake_case : Union[int, float] = 1 / 255 , _snake_case : bool = True , _snake_case : bool = True , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , **_snake_case : Any , ):
"""simple docstring"""
super().__init__(**_snake_case)
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase_ = get_size_dict(_snake_case , default_to_square=_snake_case)
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ = get_size_dict(_snake_case , param_name='''crop_size''')
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = offset
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase ( self : str , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : PILImageResampling = PILImageResampling.BILINEAR , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = get_size_dict(_snake_case , default_to_square=_snake_case)
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(_snake_case , size['''shortest_edge'''] , default_to_square=_snake_case)
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size['''height'''], size['''width'''])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""")
return resize(_snake_case , size=_snake_case , resample=_snake_case , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : np.ndarray , _snake_case : Dict[str, int] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Dict , ):
"""simple docstring"""
UpperCAmelCase_ = get_size_dict(_snake_case)
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""")
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : Any , _snake_case : np.ndarray , _snake_case : Union[int, float] , _snake_case : bool = True , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
UpperCAmelCase_ = image.astype(np.floataa)
if offset:
UpperCAmelCase_ = image - (scale / 2)
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : Dict , _snake_case : np.ndarray , _snake_case : Union[float, List[float]] , _snake_case : Union[float, List[float]] , _snake_case : Optional[Union[str, ChannelDimension]] = None , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case)
def lowerCamelCase ( self : Optional[Any] , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : bool = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''')
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(_snake_case)
if do_resize:
UpperCAmelCase_ = self.resize(image=_snake_case , size=_snake_case , resample=_snake_case)
if do_center_crop:
UpperCAmelCase_ = self.center_crop(_snake_case , size=_snake_case)
if do_rescale:
UpperCAmelCase_ = self.rescale(image=_snake_case , scale=_snake_case , offset=_snake_case)
if do_normalize:
UpperCAmelCase_ = self.normalize(image=_snake_case , mean=_snake_case , std=_snake_case)
UpperCAmelCase_ = to_channel_dimension_format(_snake_case , _snake_case)
return image
def lowerCamelCase ( self : Any , _snake_case : ImageInput , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : PILImageResampling = None , _snake_case : bool = None , _snake_case : Dict[str, int] = None , _snake_case : bool = None , _snake_case : float = None , _snake_case : bool = None , _snake_case : bool = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[float, List[float]]] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : ChannelDimension = ChannelDimension.FIRST , **_snake_case : Tuple , ):
"""simple docstring"""
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = offset if offset is not None else self.offset
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_snake_case , default_to_square=_snake_case)
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_snake_case , param_name='''crop_size''')
if not valid_images(_snake_case):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
UpperCAmelCase_ = make_batched(_snake_case)
UpperCAmelCase_ = [
[
self._preprocess_image(
image=_snake_case , do_resize=_snake_case , size=_snake_case , resample=_snake_case , do_center_crop=_snake_case , crop_size=_snake_case , do_rescale=_snake_case , rescale_factor=_snake_case , offset=_snake_case , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , data_format=_snake_case , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {'''pixel_values''': videos}
return BatchFeature(data=_snake_case , tensor_type=_snake_case)
| 7 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 1 |
from __future__ import annotations
import time
snake_case_ : Union[str, Any] = list[tuple[int, int]]
snake_case_ : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ : Optional[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __snake_case :
def __init__( self : List[str] , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : Node | None):
"""simple docstring"""
UpperCAmelCase_ = pos_x
UpperCAmelCase_ = pos_y
UpperCAmelCase_ = (pos_y, pos_x)
UpperCAmelCase_ = goal_x
UpperCAmelCase_ = goal_y
UpperCAmelCase_ = parent
class __snake_case :
def __init__( self : List[Any] , _snake_case : tuple[int, int] , _snake_case : tuple[int, int]):
"""simple docstring"""
UpperCAmelCase_ = Node(start[1] , start[0] , goal[1] , goal[0] , _snake_case)
UpperCAmelCase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , _snake_case)
UpperCAmelCase_ = [self.start]
UpperCAmelCase_ = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
while self.node_queue:
UpperCAmelCase_ = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
UpperCAmelCase_ = True
return self.retrace_path(_snake_case)
UpperCAmelCase_ = self.get_successors(_snake_case)
for node in successors:
self.node_queue.append(_snake_case)
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase ( self : Tuple , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = []
for action in delta:
UpperCAmelCase_ = parent.pos_x + action[1]
UpperCAmelCase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(_snake_case) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_snake_case , _snake_case , self.target.pos_y , self.target.pos_x , _snake_case))
return successors
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Node | None):
"""simple docstring"""
UpperCAmelCase_ = node
UpperCAmelCase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
UpperCAmelCase_ = current_node.parent
path.reverse()
return path
class __snake_case :
def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = BreadthFirstSearch(_snake_case , _snake_case)
UpperCAmelCase_ = BreadthFirstSearch(_snake_case , _snake_case)
UpperCAmelCase_ = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase_ = self.fwd_bfs.node_queue.pop(0)
UpperCAmelCase_ = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase_ = True
return self.retrace_bidirectional_path(
_snake_case , _snake_case)
UpperCAmelCase_ = current_bwd_node
UpperCAmelCase_ = current_fwd_node
UpperCAmelCase_ = {
self.fwd_bfs: self.fwd_bfs.get_successors(_snake_case),
self.bwd_bfs: self.bwd_bfs.get_successors(_snake_case),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_snake_case)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCamelCase ( self : int , _snake_case : Node , _snake_case : Node):
"""simple docstring"""
UpperCAmelCase_ = self.fwd_bfs.retrace_path(_snake_case)
UpperCAmelCase_ = self.bwd_bfs.retrace_path(_snake_case)
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
snake_case_ : Dict = (0, 0)
snake_case_ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case_ : Any = time.time()
snake_case_ : Dict = BreadthFirstSearch(init, goal)
snake_case_ : Tuple = bfs.search()
snake_case_ : Optional[int] = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
snake_case_ : Any = time.time()
snake_case_ : List[Any] = BidirectionalBreadthFirstSearch(init, goal)
snake_case_ : Optional[int] = bd_bfs.search()
snake_case_ : Union[str, Any] = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 7 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def A () -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__A , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__A , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__A )
return parser.parse_args()
def A () -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = parse_args()
# Import training_script as a module.
UpperCAmelCase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCAmelCase_ = script_fpath.stem
UpperCAmelCase_ = importlib.import_module(__A )
# Patch sys.argv
UpperCAmelCase_ = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 7 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
snake_case_ : int = logging.get_logger(__name__)
class __snake_case ( a ):
def __init__( self : Any , *_snake_case : Union[str, Any] , **_snake_case : List[str]):
"""simple docstring"""
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , _snake_case , )
super().__init__(*_snake_case , **_snake_case)
| 7 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
def A (__A : int = 10**12 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"{solution() = }")
| 7 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def A (__A : str , __A : str = "cpu" , __A : Union[str, None] = None ) -> None:
"""simple docstring"""
UpperCAmelCase_ = torch.load(__A , map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
UpperCAmelCase_ = v.half()
if save_path is None: # overwrite src_path
UpperCAmelCase_ = src_path
torch.save(__A , __A )
if __name__ == "__main__":
fire.Fire(convert)
| 7 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 1 |
def A (__A : str ) -> list:
"""simple docstring"""
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__A ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 7 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
snake_case_ : Dict = None
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
snake_case_ : List[str] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
snake_case_ : int = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
snake_case_ : List[Any] = "▁"
class __snake_case ( a ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = BigBirdTokenizer
UpperCAmelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCAmelCase__ : List[int] = []
def __init__( self : List[Any] , _snake_case : List[Any]=None , _snake_case : str=None , _snake_case : List[str]="<unk>" , _snake_case : Tuple="<s>" , _snake_case : Union[str, Any]="</s>" , _snake_case : List[Any]="<pad>" , _snake_case : Optional[Any]="[SEP]" , _snake_case : Tuple="[MASK]" , _snake_case : Tuple="[CLS]" , **_snake_case : Optional[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else bos_token
UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else eos_token
UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else unk_token
UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else pad_token
UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else cls_token
UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else mask_token
super().__init__(
_snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , **_snake_case , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = False if not self.vocab_file else True
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self : int , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''')
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_snake_case)) + [1]
return [1] + ([0] * len(_snake_case)) + [1] + ([0] * len(_snake_case)) + [1]
def lowerCamelCase ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCamelCase ( self : Any , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(_snake_case):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
UpperCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case):
copyfile(self.vocab_file , _snake_case)
return (out_vocab_file,)
| 7 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[int]=13 , _snake_case : Dict=32 , _snake_case : List[Any]=2 , _snake_case : List[Any]=3 , _snake_case : Tuple=16 , _snake_case : Union[str, Any]=[1, 2, 1] , _snake_case : Optional[Any]=[2, 2, 4] , _snake_case : Any=2 , _snake_case : Optional[Any]=2.0 , _snake_case : List[Any]=True , _snake_case : Any=0.0 , _snake_case : str=0.0 , _snake_case : Optional[Any]=0.1 , _snake_case : int="gelu" , _snake_case : Any=False , _snake_case : str=True , _snake_case : Optional[int]=0.0_2 , _snake_case : str=1e-5 , _snake_case : Tuple=True , _snake_case : Tuple=None , _snake_case : Union[str, Any]=True , _snake_case : str=10 , _snake_case : Union[str, Any]=8 , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = depths
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = window_size
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = use_absolute_embeddings
UpperCAmelCase_ = patch_norm
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = is_training
UpperCAmelCase_ = scope
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = encoder_stride
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase ( self : int , _snake_case : int , _snake_case : Dict , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = SwinvaModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case)
UpperCAmelCase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
UpperCAmelCase_ = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def lowerCamelCase ( self : Any , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = SwinvaForMaskedImageModeling(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = SwinvaForMaskedImageModeling(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def lowerCamelCase ( self : Any , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = SwinvaForImageClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCAmelCase__ : List[Any] = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : str = False
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = SwinvaModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , embed_dim=37)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''')
def lowerCamelCase ( self : int):
"""simple docstring"""
pass
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear))
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.attentions
UpperCAmelCase_ = len(self.model_tester.depths)
self.assertEqual(len(_snake_case) , _snake_case)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = config.window_size**2
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(_snake_case) , _snake_case)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
UpperCAmelCase_ = len(_snake_case)
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
if hasattr(self.model_tester , '''num_hidden_states_types'''):
UpperCAmelCase_ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
UpperCAmelCase_ = 2
self.assertEqual(out_len + added_hidden_states , len(_snake_case))
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(_snake_case) , _snake_case)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Optional[int] , _snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = model_class(_snake_case)
model.to(_snake_case)
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(_snake_case) , _snake_case)
# Swinv2 has a different seq_length
UpperCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
UpperCAmelCase_ = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case) , _snake_case)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = reshaped_hidden_states[0].shape
UpperCAmelCase_ = (
reshaped_hidden_states[0].view(_snake_case , _snake_case , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
UpperCAmelCase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwinvaModel.from_pretrained(_snake_case)
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(_snake_case)
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=_snake_case)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''')
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''').to(
_snake_case)
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''pt''').to(_snake_case)
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6]).to(_snake_case)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Tuple = OpenAIGPTTokenizer
UpperCAmelCase__ : Any = OpenAIGPTTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : Optional[Any] = False
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''') as fp:
fp.write(json.dumps(_snake_case))
with open(self.merges_file , '''w''') as fp:
fp.write('''\n'''.join(_snake_case))
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any]):
"""simple docstring"""
return "lower newer", "lower newer"
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
UpperCAmelCase_ = '''lower'''
UpperCAmelCase_ = ['''low''', '''er</w>''']
UpperCAmelCase_ = tokenizer.tokenize(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
UpperCAmelCase_ = tokens + ['''<unk>''']
UpperCAmelCase_ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case) , _snake_case)
def lowerCamelCase ( self : int , _snake_case : List[Any]=15):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case)
# Simple input
UpperCAmelCase_ = '''This is a simple input'''
UpperCAmelCase_ = ['''This is a simple input 1''', '''This is a simple input 2''']
UpperCAmelCase_ = ('''This is a simple input''', '''This is a pair''')
UpperCAmelCase_ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''')
# Simple input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''')
# Simple input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode , _snake_case , max_length=_snake_case , padding='''max_length''')
# Pair input
self.assertRaises(_snake_case , tokenizer_r.encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''')
# Pair input
self.assertRaises(
_snake_case , tokenizer_r.batch_encode_plus , _snake_case , max_length=_snake_case , padding='''max_length''' , )
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __snake_case ( a ):
pass
| 7 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A (__A : str ) -> List[Any]:
"""simple docstring"""
if "img_encoder.pos_embed" in name:
UpperCAmelCase_ = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
UpperCAmelCase_ = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
UpperCAmelCase_ = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
UpperCAmelCase_ = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
UpperCAmelCase_ = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
UpperCAmelCase_ = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
UpperCAmelCase_ = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
UpperCAmelCase_ = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
UpperCAmelCase_ = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
UpperCAmelCase_ = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
UpperCAmelCase_ = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
UpperCAmelCase_ = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
UpperCAmelCase_ = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
UpperCAmelCase_ = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
UpperCAmelCase_ = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
UpperCAmelCase_ = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
UpperCAmelCase_ = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
UpperCAmelCase_ = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
UpperCAmelCase_ = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
UpperCAmelCase_ = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
UpperCAmelCase_ = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
UpperCAmelCase_ = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
UpperCAmelCase_ = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
UpperCAmelCase_ = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def A (__A : Any , __A : List[str] ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(__A )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ = key.split('''.''' )
UpperCAmelCase_ , UpperCAmelCase_ = int(key_split[2] ), int(key_split[4] )
UpperCAmelCase_ = config.vision_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
UpperCAmelCase_ = key.split('''.''' )
UpperCAmelCase_ = int(key_split[3] )
UpperCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[
dim : dim * 2, :
]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = rename_key(__A )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
UpperCAmelCase_ = val.squeeze_()
else:
UpperCAmelCase_ = val
return orig_state_dict
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A (__A : Union[str, Any] , __A : Optional[Any] , __A : List[str]="groupvit-gcc-yfcc" , __A : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = GroupViTConfig()
UpperCAmelCase_ = GroupViTModel(__A ).eval()
UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' )['''model''']
UpperCAmelCase_ = convert_state_dict(__A , __A )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(__A , strict=__A )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__A ) == 0)
# verify result
UpperCAmelCase_ = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=__A , padding=__A , return_tensors='''pt''' )
with torch.no_grad():
UpperCAmelCase_ = model(**__A )
if model_name == "groupvit-gcc-yfcc":
UpperCAmelCase_ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
UpperCAmelCase_ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , __A , atol=1E-3 )
processor.save_pretrained(__A )
model.save_pretrained(__A )
print('''Successfully saved processor and model to''' , __A )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(__A , organization='''nielsr''' )
model.push_to_hub(__A , organization='''nielsr''' )
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
snake_case_ : Optional[int] = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 7 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
snake_case_ : str = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A (__A : List[Any] , __A : tuple , __A : Path , __A : Optional[Any] , __A : str , __A : List[str] , __A : Tuple , __A : Tuple=False , ) -> Optional[int]:
"""simple docstring"""
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def A (__A : str , __A : str , __A : int , __A : bool = False ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase_ = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = Path(__A )
# VAE DECODER
UpperCAmelCase_ = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
UpperCAmelCase_ = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCAmelCase_ = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
snake_case_ : Any = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 7 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 1 |
from math import pow, sqrt
def A (*__A : float ) -> bool:
"""simple docstring"""
UpperCAmelCase_ = len(__A ) > 0 and all(value > 0.0 for value in values )
return result
def A (__A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def A (__A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def A (__A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def A (__A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def A (__A : float , __A : float , __A : float ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__A , __A , __A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 7 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def A (__A : List[str] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = 384
UpperCAmelCase_ = 7
if "tiny" in model_name:
UpperCAmelCase_ = 96
UpperCAmelCase_ = (2, 2, 6, 2)
UpperCAmelCase_ = (3, 6, 12, 24)
elif "small" in model_name:
UpperCAmelCase_ = 96
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (3, 6, 12, 24)
elif "base" in model_name:
UpperCAmelCase_ = 128
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (4, 8, 16, 32)
UpperCAmelCase_ = 12
UpperCAmelCase_ = 512
elif "large" in model_name:
UpperCAmelCase_ = 192
UpperCAmelCase_ = (2, 2, 18, 2)
UpperCAmelCase_ = (6, 12, 24, 48)
UpperCAmelCase_ = 12
UpperCAmelCase_ = 768
# set label information
UpperCAmelCase_ = 150
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = '''ade20k-id2label.json'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = SwinConfig(
embed_dim=__A , depths=__A , num_heads=__A , window_size=__A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
UpperCAmelCase_ = UperNetConfig(
backbone_config=__A , auxiliary_in_channels=__A , num_labels=__A , idalabel=__A , labelaid=__A , )
return config
def A (__A : List[str] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
# fmt: off
# stem
rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def A (__A : Optional[int] , __A : Any , __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = dct.pop(__A )
UpperCAmelCase_ = val
def A (__A : str , __A : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def A (__A : int ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = x.shape
UpperCAmelCase_ = x.reshape(__A , 4 , in_channel // 4 )
UpperCAmelCase_ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__A , __A )
return x
def A (__A : str ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = x.shape
UpperCAmelCase_ = x.reshape(__A , in_channel // 4 , 4 )
UpperCAmelCase_ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__A , __A )
return x
def A (__A : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ = x.shape[0]
UpperCAmelCase_ = x.reshape(4 , in_channel // 4 )
UpperCAmelCase_ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__A )
return x
def A (__A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = x.shape[0]
UpperCAmelCase_ = x.reshape(in_channel // 4 , 4 )
UpperCAmelCase_ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__A )
return x
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = {
'''upernet-swin-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth''',
'''upernet-swin-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth''',
'''upernet-swin-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth''',
'''upernet-swin-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth''',
}
UpperCAmelCase_ = model_name_to_url[model_name]
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__A , map_location='''cpu''' , file_name=__A )[
'''state_dict'''
]
for name, param in state_dict.items():
print(__A , param.shape )
UpperCAmelCase_ = get_upernet_config(__A )
UpperCAmelCase_ = UperNetForSemanticSegmentation(__A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase_ = state_dict.pop(__A )
if "bn" in key:
UpperCAmelCase_ = key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase_ = val
# rename keys
UpperCAmelCase_ = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
UpperCAmelCase_ = reverse_correct_unfold_reduction_order(__A )
if "norm" in key:
UpperCAmelCase_ = reverse_correct_unfold_norm_order(__A )
model.load_state_dict(__A )
# verify on image
UpperCAmelCase_ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw ).convert('''RGB''' )
UpperCAmelCase_ = SegformerImageProcessor()
UpperCAmelCase_ = processor(__A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase_ = model(__A )
UpperCAmelCase_ = outputs.logits
print(logits.shape )
print('''First values of logits:''' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
UpperCAmelCase_ = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
UpperCAmelCase_ = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
UpperCAmelCase_ = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
UpperCAmelCase_ = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__A )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"upernet-swin-{size}" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case_ : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 1 |
import math
def A (__A : list , __A : int = 0 , __A : int = 0 ) -> list:
"""simple docstring"""
UpperCAmelCase_ = end or len(__A )
for i in range(__A , __A ):
UpperCAmelCase_ = i
UpperCAmelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCAmelCase_ = array[temp_index - 1]
temp_index -= 1
UpperCAmelCase_ = temp_index_value
return array
def A (__A : list , __A : int , __A : int ) -> None: # Max Heap
"""simple docstring"""
UpperCAmelCase_ = index
UpperCAmelCase_ = 2 * index + 1 # Left Node
UpperCAmelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCAmelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCAmelCase_ = right_index
if largest != index:
UpperCAmelCase_ , UpperCAmelCase_ = array[largest], array[index]
heapify(__A , __A , __A )
def A (__A : list ) -> list:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
for i in range(n // 2 , -1 , -1 ):
heapify(__A , __A , __A )
for i in range(n - 1 , 0 , -1 ):
UpperCAmelCase_ , UpperCAmelCase_ = array[0], array[i]
heapify(__A , 0 , __A )
return array
def A (__A : list , __A : int , __A : int , __A : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A (__A : list , __A : int , __A : int , __A : int ) -> int:
"""simple docstring"""
UpperCAmelCase_ = low
UpperCAmelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCAmelCase_ , UpperCAmelCase_ = array[j], array[i]
i += 1
def A (__A : list ) -> list:
"""simple docstring"""
if len(__A ) == 0:
return array
UpperCAmelCase_ = 2 * math.ceil(math.loga(len(__A ) ) )
UpperCAmelCase_ = 16
return intro_sort(__A , 0 , len(__A ) , __A , __A )
def A (__A : list , __A : int , __A : int , __A : int , __A : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__A )
max_depth -= 1
UpperCAmelCase_ = median_of_a(__A , __A , start + ((end - start) // 2) + 1 , end - 1 )
UpperCAmelCase_ = partition(__A , __A , __A , __A )
intro_sort(__A , __A , __A , __A , __A )
UpperCAmelCase_ = p
return insertion_sort(__A , __A , __A )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = input("Enter numbers separated by a comma : ").strip()
snake_case_ : Any = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 7 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class __snake_case ( a , a ):
UpperCAmelCase__ : Any = '''resnet'''
UpperCAmelCase__ : List[Any] = ['''basic''', '''bottleneck''']
def __init__( self : Optional[Any] , _snake_case : Tuple=3 , _snake_case : Tuple=64 , _snake_case : Optional[Any]=[256, 512, 1024, 2048] , _snake_case : List[Any]=[3, 4, 6, 3] , _snake_case : List[Any]="bottleneck" , _snake_case : int="relu" , _snake_case : int=False , _snake_case : str=None , _snake_case : Any=None , **_snake_case : int , ):
"""simple docstring"""
super().__init__(**_snake_case)
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types)}""")
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = downsample_in_first_stage
UpperCAmelCase_ = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(_snake_case) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_snake_case , out_indices=_snake_case , stage_names=self.stage_names)
class __snake_case ( a ):
UpperCAmelCase__ : str = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return 1e-3
| 7 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
snake_case_ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
a , r'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' , )
class __snake_case ( a ):
def lowerCamelCase ( self : Any , _snake_case : GenericTensor):
"""simple docstring"""
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case)
else:
raise ValueError('''Unsupported framework''')
return masked_index
def lowerCamelCase ( self : Optional[Any] , _snake_case : GenericTensor):
"""simple docstring"""
UpperCAmelCase_ = self.get_masked_index(_snake_case)
UpperCAmelCase_ = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase ( self : List[Any] , _snake_case : GenericTensor):
"""simple docstring"""
if isinstance(_snake_case , _snake_case):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : str , _snake_case : Union[str, Any]=None , **_snake_case : Dict):
"""simple docstring"""
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(_snake_case , return_tensors=_snake_case)
self.ensure_exactly_one_mask_token(_snake_case)
return model_inputs
def lowerCamelCase ( self : Tuple , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
UpperCAmelCase_ = model_inputs['''input_ids''']
return model_outputs
def lowerCamelCase ( self : Any , _snake_case : int , _snake_case : Union[str, Any]=5 , _snake_case : Optional[int]=None):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs['''input_ids'''][0]
UpperCAmelCase_ = model_outputs['''logits''']
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(_snake_case , axis=-1)
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(_snake_case , 0) , target_ids.reshape(-1 , 1))
UpperCAmelCase_ = tf.expand_dims(_snake_case , 0)
UpperCAmelCase_ = tf.math.top_k(_snake_case , k=_snake_case)
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_snake_case).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1)
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p]), '''sequence''': sequence}
row.append(_snake_case)
result.append(_snake_case)
if single_mask:
return result[0]
return result
def lowerCamelCase ( self : Union[str, Any] , _snake_case : List[Any] , _snake_case : Optional[Any]=None):
"""simple docstring"""
if isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(_snake_case , _snake_case)
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
_snake_case , add_special_tokens=_snake_case , return_attention_mask=_snake_case , return_token_type_ids=_snake_case , max_length=1 , truncation=_snake_case , )['''input_ids''']
if len(_snake_case) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''')
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.""")
target_ids.append(id_)
UpperCAmelCase_ = list(set(_snake_case))
if len(_snake_case) == 0:
raise ValueError('''At least one target must be provided when passed.''')
UpperCAmelCase_ = np.array(_snake_case)
return target_ids
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]=None , _snake_case : List[Any]=None):
"""simple docstring"""
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(_snake_case , _snake_case)
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''')
return {}, {}, postprocess_params
def __call__( self : str , _snake_case : Dict , *_snake_case : Tuple , **_snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
if isinstance(_snake_case , _snake_case) and len(_snake_case) == 1:
return outputs[0]
return outputs
| 7 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : str):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_snake_case , )
assert hasattr(self , '''env''')
def lowerCamelCase ( self : List[Any] , _snake_case : Union[str, Any]=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_snake_case , instance_type=self.instance_type , debugger_hook_config=_snake_case , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def lowerCamelCase ( self : int , _snake_case : Dict):
"""simple docstring"""
TrainingJobAnalytics(_snake_case).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""")
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _snake_case)
| 7 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Optional[int] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
snake_case_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : List[Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = '''t5'''
UpperCAmelCase__ : Optional[int] = ['''past_key_values''']
UpperCAmelCase__ : List[str] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Tuple , _snake_case : Optional[Any]=32128 , _snake_case : int=512 , _snake_case : Union[str, Any]=64 , _snake_case : List[str]=2048 , _snake_case : Tuple=6 , _snake_case : List[str]=None , _snake_case : List[Any]=8 , _snake_case : List[Any]=32 , _snake_case : Dict=128 , _snake_case : Tuple=0.1 , _snake_case : str=1e-6 , _snake_case : List[str]=1.0 , _snake_case : List[Any]="relu" , _snake_case : str=True , _snake_case : Optional[Any]=True , _snake_case : str=0 , _snake_case : int=1 , **_snake_case : int , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = self.feed_forward_proj.split('''-''')
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == '''gated'''
if len(_snake_case) > 1 and act_info[0] != "gated" or len(_snake_case) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = '''gelu_new'''
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , **_snake_case , )
class __snake_case ( a ):
@property
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
UpperCAmelCase_ = '''past_encoder_sequence + sequence'''
UpperCAmelCase_ = {0: '''batch'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCAmelCase_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''')
return common_inputs
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return 13
| 7 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 1 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def A (__A : bool = True , *__A : Optional[Any] , **__A : Any ) -> Union[str, Any]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCAmelCase_ = False
if main_process_only:
UpperCAmelCase_ = PartialState().local_process_index == 0
return _tqdm(*__A , **__A , disable=__A )
| 7 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ : str = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __snake_case :
UpperCAmelCase__ : int
UpperCAmelCase__ : Node | None
class __snake_case :
def __init__( self : Optional[int] , _snake_case : Iterable[int]):
"""simple docstring"""
UpperCAmelCase_ = None
for i in sorted(_snake_case , reverse=_snake_case):
UpperCAmelCase_ = Node(_snake_case , self.head)
def __iter__( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self : int):
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self : Optional[Any]):
"""simple docstring"""
return " -> ".join([str(_snake_case) for node in self])
def A (__A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 7 | 1 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
snake_case_ : List[Any] = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
snake_case_ : Dict = logging.WARNING
def A () -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = os.getenv('''DATASETS_VERBOSITY''' , __A )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def A () -> str:
"""simple docstring"""
return __name__.split('''.''' )[0]
def A () -> logging.Logger:
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def A () -> None:
"""simple docstring"""
UpperCAmelCase_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def A () -> None:
"""simple docstring"""
UpperCAmelCase_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def A (__A : Optional[str] = None ) -> logging.Logger:
"""simple docstring"""
if name is None:
UpperCAmelCase_ = _get_library_name()
return logging.getLogger(__A )
def A () -> int:
"""simple docstring"""
return _get_library_root_logger().getEffectiveLevel()
def A (__A : int ) -> None:
"""simple docstring"""
_get_library_root_logger().setLevel(__A )
def A () -> Dict:
"""simple docstring"""
return set_verbosity(__A )
def A () -> Optional[int]:
"""simple docstring"""
return set_verbosity(__A )
def A () -> Optional[Any]:
"""simple docstring"""
return set_verbosity(__A )
def A () -> Optional[int]:
"""simple docstring"""
return set_verbosity(__A )
def A () -> None:
"""simple docstring"""
UpperCAmelCase_ = False
def A () -> None:
"""simple docstring"""
UpperCAmelCase_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __snake_case :
def __init__( self : str , *_snake_case : int , **_snake_case : List[str]): # pylint: disable=unused-argument
"""simple docstring"""
UpperCAmelCase_ = args[0] if args else None
def __iter__( self : Optional[Any]):
"""simple docstring"""
return iter(self._iterator)
def __getattr__( self : Any , _snake_case : List[Any]):
"""simple docstring"""
def empty_fn(*_snake_case : Any , **_snake_case : Tuple): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Dict):
"""simple docstring"""
return self
def __exit__( self : List[Any] , _snake_case : Dict , _snake_case : int , _snake_case : List[Any]):
"""simple docstring"""
return
snake_case_ : Optional[int] = True
class __snake_case :
def __call__( self : int , *_snake_case : str , _snake_case : Tuple=False , **_snake_case : Union[str, Any]):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_snake_case , **_snake_case)
else:
return EmptyTqdm(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Tuple , *_snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
snake_case_ : Tuple = _tqdm_cls()
def A () -> bool:
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def A () -> int:
"""simple docstring"""
global _tqdm_active
UpperCAmelCase_ = True
def A () -> Optional[int]:
"""simple docstring"""
global _tqdm_active
UpperCAmelCase_ = False
| 7 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 1 |
def A (__A : list ) -> float:
"""simple docstring"""
UpperCAmelCase_ = 0
while len(__A ) > 1:
UpperCAmelCase_ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCAmelCase_ = files.index(min(__A ) )
temp += files[min_index]
files.pop(__A )
files.append(__A )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __snake_case ( a ):
UpperCAmelCase__ : Optional[Any] = ['''image_processor''', '''tokenizer''']
UpperCAmelCase__ : int = '''BridgeTowerImageProcessor'''
UpperCAmelCase__ : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Optional[Any] , _snake_case : Optional[Any] , _snake_case : str):
"""simple docstring"""
super().__init__(_snake_case , _snake_case)
def __call__( self : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _snake_case : bool = True , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Union[bool, str, TruncationStrategy] = None , _snake_case : Optional[int] = None , _snake_case : int = 0 , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = True , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer(
text=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel_values + pixel_mask
UpperCAmelCase_ = self.image_processor(
_snake_case , return_tensors=_snake_case , do_normalize=_snake_case , do_center_crop=_snake_case , **_snake_case)
encoding.update(_snake_case)
return encoding
def lowerCamelCase ( self : List[str] , *_snake_case : List[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : List[Any] , **_snake_case : str):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
@property
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 7 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 1 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : int = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case ( unittest.TestCase ):
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : Dict=3 , _snake_case : Dict=32 , _snake_case : List[str]=3 , _snake_case : Union[str, Any]=10 , _snake_case : Tuple=[10, 20, 30, 40] , _snake_case : Dict=[1, 1, 2, 1] , _snake_case : List[Any]=True , _snake_case : Dict=True , _snake_case : Union[str, Any]="relu" , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCAmelCase_ = self.get_config()
return config, pixel_values
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModel(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = FlaxRegNetForImageClassification(config=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
return
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
pass
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
def check_hidden_states_output(_snake_case : List[str] , _snake_case : Dict , _snake_case : List[str]):
UpperCAmelCase_ = model_class(_snake_case)
UpperCAmelCase_ = model(**self._prepare_for_class(_snake_case , _snake_case))
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case) , expected_num_stages + 1)
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
UpperCAmelCase_ = self._prepare_for_class(_snake_case , _snake_case)
UpperCAmelCase_ = model_class(_snake_case)
@jax.jit
def model_jitted(_snake_case : str , **_snake_case : Union[str, Any]):
return model(pixel_values=_snake_case , **_snake_case)
with self.subTest('''JIT Enabled'''):
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
UpperCAmelCase_ = model_jitted(**_snake_case).to_tuple()
self.assertEqual(len(_snake_case) , len(_snake_case))
for jitted_output, output in zip(_snake_case , _snake_case):
self.assertEqual(jitted_output.shape , output.shape)
def A () -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class __snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_snake_case , return_tensors='''np''')
UpperCAmelCase_ = model(**_snake_case)
# verify the logits
UpperCAmelCase_ = (1, 1000)
self.assertEqual(outputs.logits.shape , _snake_case)
UpperCAmelCase_ = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4))
| 7 | 1 |
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __snake_case :
def __init__( self : int , _snake_case : str , _snake_case : Optional[int]=13 , _snake_case : List[Any]=7 , _snake_case : Optional[int]=True , _snake_case : Optional[int]=True , _snake_case : List[Any]=True , _snake_case : Optional[Any]=True , _snake_case : Any=99 , _snake_case : Dict=64 , _snake_case : Optional[Any]=32 , _snake_case : str=5 , _snake_case : str=4 , _snake_case : Union[str, Any]=37 , _snake_case : Optional[int]="gelu" , _snake_case : Dict=0.1 , _snake_case : List[str]=0.1 , _snake_case : Dict=512 , _snake_case : Tuple=16 , _snake_case : List[str]=2 , _snake_case : str=0.0_2 , _snake_case : List[Any]=3 , _snake_case : Optional[Any]=4 , _snake_case : str=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : str , _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : int , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MegatronBertModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case , token_type_ids=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def lowerCamelCase ( self : Optional[int] , _snake_case : int , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Dict , _snake_case : Any , _snake_case : int , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = MegatronBertForMaskedLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : str , _snake_case : List[Any] , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : int , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = MegatronBertForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : int , _snake_case : List[Any] , _snake_case : str , _snake_case : Tuple , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = MegatronBertForNextSentencePrediction(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def lowerCamelCase ( self : Dict , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : str , _snake_case : Optional[int] , _snake_case : Any , _snake_case : Dict , _snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = MegatronBertForPreTraining(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , next_sentence_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = MegatronBertForQuestionAnswering(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : int , _snake_case : Tuple , _snake_case : str , _snake_case : Tuple , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Dict , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MegatronBertForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MegatronBertForTokenClassification(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : Optional[int] , _snake_case : List[Any] , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MegatronBertForMultipleChoice(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : int = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
'''feature-extraction''': MegatronBertModel,
'''fill-mask''': MegatronBertForMaskedLM,
'''question-answering''': MegatronBertForQuestionAnswering,
'''text-classification''': MegatronBertForSequenceClassification,
'''text-generation''': MegatronBertForCausalLM,
'''token-classification''': MegatronBertForTokenClassification,
'''zero-shot''': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = True
# test_resize_embeddings = False
UpperCAmelCase__ : str = False
def lowerCamelCase ( self : Optional[int] , _snake_case : Dict , _snake_case : int , _snake_case : List[str]=False):
"""simple docstring"""
UpperCAmelCase_ = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case)
if return_labels:
if model_class in get_values(_snake_case):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case)
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case)
return inputs_dict
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = MegatronBertModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=37)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_snake_case)
def A (__A : int ) -> Any:
"""simple docstring"""
return torch.tensor(
__A , dtype=torch.long , device=__A , )
snake_case_ : int = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
@unittest.skip('''Model is not available.''')
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
UpperCAmelCase_ = os.path.join(os.environ['''MYDIR'''] , _snake_case)
UpperCAmelCase_ = MegatronBertModel.from_pretrained(_snake_case)
model.to(_snake_case)
model.half()
UpperCAmelCase_ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]])
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)[0]
UpperCAmelCase_ = torch.Size((1, 9, 1024))
self.assertEqual(output.shape , _snake_case)
UpperCAmelCase_ = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3):
for jj in range(3):
UpperCAmelCase_ = output[0, ii, jj]
UpperCAmelCase_ = expected[3 * ii + jj]
UpperCAmelCase_ = '''ii={} jj={} a={} b={}'''.format(_snake_case , _snake_case , _snake_case , _snake_case)
self.assertTrue(math.isclose(_snake_case , _snake_case , rel_tol=_snake_case , abs_tol=_snake_case) , msg=_snake_case)
| 7 |
import comet # From: unbabel-comet
import torch
import datasets
snake_case_ : Tuple = datasets.logging.get_logger(__name__)
snake_case_ : str = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
snake_case_ : Tuple = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
snake_case_ : Optional[int] = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence'''),
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[int]):
"""simple docstring"""
if self.config_name == "default":
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da'''))
else:
UpperCAmelCase_ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def lowerCamelCase ( self : List[Any] , _snake_case : str , _snake_case : List[str] , _snake_case : Tuple , _snake_case : int=None , _snake_case : Optional[Any]=False):
"""simple docstring"""
if gpus is None:
UpperCAmelCase_ = 1 if torch.cuda.is_available() else 0
UpperCAmelCase_ = {'''src''': sources, '''mt''': predictions, '''ref''': references}
UpperCAmelCase_ = [dict(zip(_snake_case , _snake_case)) for t in zip(*data.values())]
UpperCAmelCase_ , UpperCAmelCase_ = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case)
return {"mean_score": mean_score, "scores": scores}
| 7 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __snake_case :
def __init__( self : List[str] , _snake_case : Union[str, Any] , _snake_case : Optional[Any]=13 , _snake_case : Dict=7 , _snake_case : Union[str, Any]=True , _snake_case : int=True , _snake_case : Optional[Any]=True , _snake_case : Optional[Any]=True , _snake_case : str=99 , _snake_case : Tuple=64 , _snake_case : str=5 , _snake_case : Any=4 , _snake_case : str=37 , _snake_case : Dict="gelu" , _snake_case : Optional[int]=0.1 , _snake_case : Any=0.1 , _snake_case : Tuple=512 , _snake_case : Union[str, Any]=16 , _snake_case : Optional[Any]=2 , _snake_case : Any=0.0_2 , _snake_case : Any=3 , _snake_case : Dict=4 , _snake_case : Any=None , ):
"""simple docstring"""
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = vocab_size - 1
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase ( self : Union[str, Any] , _snake_case : str , _snake_case : Tuple , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = GPTNeoXModel(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)
UpperCAmelCase_ = model(_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : str , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = True
UpperCAmelCase_ = GPTNeoXModel(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCamelCase ( self : str , _snake_case : int , _snake_case : List[str] , _snake_case : Union[str, Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = GPTNeoXForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCamelCase ( self : int , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = GPTNeoXForQuestionAnswering(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCamelCase ( self : Dict , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = GPTNeoXForSequenceClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCamelCase ( self : int , _snake_case : Optional[int] , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = GPTNeoXForTokenClassification(_snake_case)
model.to(_snake_case)
model.eval()
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , labels=_snake_case)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCamelCase ( self : str , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = True
UpperCAmelCase_ = GPTNeoXForCausalLM(config=_snake_case)
model.to(_snake_case)
model.eval()
# first forward pass
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case)
UpperCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1)
UpperCAmelCase_ = model(_snake_case , attention_mask=_snake_case , output_hidden_states=_snake_case)
UpperCAmelCase_ = output_from_no_past['''hidden_states'''][0]
UpperCAmelCase_ = model(
_snake_case , attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-3))
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : Dict = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Tuple = False
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTNeoXModelTester(self)
UpperCAmelCase_ = ConfigTester(self , config_class=_snake_case , hidden_size=64 , num_attention_heads=8)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_ = None
self.model_tester.create_and_check_model_as_decoder(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case)
@unittest.skip(reason='''Feed forward chunking is not implemented''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)])
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ids_tensor([1, 10] , config.vocab_size)
UpperCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = GPTNeoXModel(_snake_case)
original_model.to(_snake_case)
original_model.eval()
UpperCAmelCase_ = original_model(_snake_case).last_hidden_state
UpperCAmelCase_ = original_model(_snake_case).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = {'''type''': scaling_type, '''factor''': 1_0.0}
UpperCAmelCase_ = GPTNeoXModel(_snake_case)
scaled_model.to(_snake_case)
scaled_model.eval()
UpperCAmelCase_ = scaled_model(_snake_case).last_hidden_state
UpperCAmelCase_ = scaled_model(_snake_case).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1e-5))
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1e-5))
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''')
for checkpointing in [True, False]:
UpperCAmelCase_ = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''')
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_snake_case)
UpperCAmelCase_ = tokenizer('''My favorite food is''' , return_tensors='''pt''').to(_snake_case)
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase_ = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
UpperCAmelCase_ = model.generate(**_snake_case , do_sample=_snake_case , max_new_tokens=20)
UpperCAmelCase_ = tokenizer.batch_decode(_snake_case)[0]
self.assertEqual(_snake_case , _snake_case)
| 7 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : str = (('''num_inference_steps''', 2_5),)
def lowerCamelCase ( self : Dict , **_snake_case : Dict):
"""simple docstring"""
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf'''),
'''variance_type''': None,
}
config.update(**_snake_case)
return config
def lowerCamelCase ( self : Dict , _snake_case : int=0 , **_snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ , UpperCAmelCase_ = sample, sample
for t in range(_snake_case , time_step + scheduler.config.solver_order + 1):
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
def lowerCamelCase ( self : Tuple , _snake_case : Optional[Any]=0 , **_snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _snake_case)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_snake_case)
scheduler.set_timesteps(_snake_case)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_snake_case)
UpperCAmelCase_ = scheduler_class.from_pretrained(_snake_case)
# copy over dummy past residuals
new_scheduler.set_timesteps(_snake_case)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[: new_scheduler.config.solver_order]
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
UpperCAmelCase_ = new_scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase ( self : Dict , _snake_case : int=None , **_snake_case : Optional[Any]):
"""simple docstring"""
if scheduler is None:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_snake_case)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
return sample
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = 50
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_snake_case)
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:]):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_5_7_4) < 1e-3
def lowerCamelCase ( self : int):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=_snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = DPMSolverSinglestepScheduler(**self.get_scheduler_config())
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
UpperCAmelCase_ = DEISMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = UniPCMultistepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = DPMSolverSinglestepScheduler.from_config(scheduler.config)
UpperCAmelCase_ = self.full_loop(scheduler=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(thresholding=_snake_case)
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_snake_case , prediction_type=_snake_case , sample_max_value=_snake_case , algorithm_type='''dpmsolver++''' , solver_order=_snake_case , solver_type=_snake_case , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
UpperCAmelCase_ = self.full_loop(
solver_order=_snake_case , solver_type=_snake_case , prediction_type=_snake_case , algorithm_type=_snake_case , )
assert not torch.isnan(_snake_case).any(), "Samples have nan numbers"
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lower_order_final=_snake_case)
self.check_over_configs(lower_order_final=_snake_case)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf'''))
self.check_over_configs(lambda_min_clipped=-5.1)
def lowerCamelCase ( self : int):
"""simple docstring"""
self.check_over_configs(variance_type=_snake_case)
self.check_over_configs(variance_type='''learned_range''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=_snake_case , time_step=0)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_7_9_1) < 1e-3
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.2_2_4_8) < 1e-3
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.1_4_5_3) < 1e-3
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_snake_case)
UpperCAmelCase_ = torch.mean(torch.abs(_snake_case))
assert abs(result_mean.item() - 0.0_6_4_9) < 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(thresholding=_snake_case , dynamic_thresholding_ratio=0)
UpperCAmelCase_ = scheduler_class(**_snake_case)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(_snake_case)
for i, t in enumerate(scheduler.timesteps):
UpperCAmelCase_ = model(_snake_case , _snake_case)
UpperCAmelCase_ = scheduler.step(_snake_case , _snake_case , _snake_case).prev_sample
assert sample.dtype == torch.floataa
| 7 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = '''M-CLIP'''
def __init__( self : Any , _snake_case : List[Any]=1024 , _snake_case : str=768 , **_snake_case : str):
"""simple docstring"""
UpperCAmelCase_ = transformerDimSize
UpperCAmelCase_ = imageDimSize
super().__init__(**_snake_case)
class __snake_case ( a ):
UpperCAmelCase__ : Any = MCLIPConfig
def __init__( self : List[str] , _snake_case : str , *_snake_case : Union[str, Any] , **_snake_case : Union[str, Any]):
"""simple docstring"""
super().__init__(_snake_case , *_snake_case , **_snake_case)
UpperCAmelCase_ = XLMRobertaModel(_snake_case)
UpperCAmelCase_ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def lowerCamelCase ( self : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.transformer(input_ids=_snake_case , attention_mask=_snake_case)[0]
UpperCAmelCase_ = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(_snake_case), embs
| 7 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : List[Any] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["DeiTFeatureExtractor"]
snake_case_ : List[str] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger(__name__)
def A (__A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
UpperCAmelCase_ = [144, 192, 240]
UpperCAmelCase_ = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
UpperCAmelCase_ = [96, 120, 144]
UpperCAmelCase_ = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
UpperCAmelCase_ = [64, 80, 96]
UpperCAmelCase_ = [16, 16, 24, 48, 64, 80, 320]
UpperCAmelCase_ = 0.05
UpperCAmelCase_ = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
UpperCAmelCase_ = 512
UpperCAmelCase_ = 16
UpperCAmelCase_ = 21
UpperCAmelCase_ = '''pascal-voc-id2label.json'''
else:
UpperCAmelCase_ = 1000
UpperCAmelCase_ = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ = '''huggingface/label-files'''
UpperCAmelCase_ = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config
def A (__A : Any , __A : List[Any]=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
UpperCAmelCase_ = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
UpperCAmelCase_ = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
UpperCAmelCase_ = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
UpperCAmelCase_ = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
UpperCAmelCase_ = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
UpperCAmelCase_ = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
UpperCAmelCase_ = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
UpperCAmelCase_ = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
UpperCAmelCase_ = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
UpperCAmelCase_ = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
UpperCAmelCase_ = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
UpperCAmelCase_ = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
UpperCAmelCase_ = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
UpperCAmelCase_ = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
UpperCAmelCase_ = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
UpperCAmelCase_ = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
UpperCAmelCase_ = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
UpperCAmelCase_ = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
UpperCAmelCase_ = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
UpperCAmelCase_ = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
UpperCAmelCase_ = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
UpperCAmelCase_ = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
UpperCAmelCase_ = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
UpperCAmelCase_ = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
UpperCAmelCase_ = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
UpperCAmelCase_ = '''mobilevit.''' + name
return name
def A (__A : Optional[int] , __A : Optional[int] , __A : Optional[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if base_model:
UpperCAmelCase_ = ''''''
else:
UpperCAmelCase_ = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(__A )
if key[:8] == "encoder.":
UpperCAmelCase_ = key[8:]
if "qkv" in key:
UpperCAmelCase_ = key.split('''.''' )
UpperCAmelCase_ = int(key_split[0][6:] ) - 1
UpperCAmelCase_ = int(key_split[3] )
UpperCAmelCase_ = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
UpperCAmelCase_ = layer.transformer.layer[transformer_num].attention.attention.all_head_size
UpperCAmelCase_ = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
else:
UpperCAmelCase_ = val
return orig_state_dict
def A () -> int:
"""simple docstring"""
UpperCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def A (__A : Any , __A : List[str] , __A : Optional[Any] , __A : int=False ) -> str:
"""simple docstring"""
UpperCAmelCase_ = get_mobilevit_config(__A )
# load original state_dict
UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
UpperCAmelCase_ = MobileViTForSemanticSegmentation(__A ).eval()
else:
UpperCAmelCase_ = MobileViTForImageClassification(__A ).eval()
UpperCAmelCase_ = convert_state_dict(__A , __A )
model.load_state_dict(__A )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase_ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCAmelCase_ = model(**__A )
UpperCAmelCase_ = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
UpperCAmelCase_ = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
UpperCAmelCase_ = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
UpperCAmelCase_ = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , __A , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
UpperCAmelCase_ = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
UpperCAmelCase_ = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
UpperCAmelCase_ = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , __A , atol=1E-4 )
Path(__A ).mkdir(exist_ok=__A )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
if push_to_hub:
UpperCAmelCase_ = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
UpperCAmelCase_ = model_mapping[mobilevit_name]
image_processor.push_to_hub(__A , organization='''apple''' )
model.push_to_hub(__A , organization='''apple''' )
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 7 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ : Dict = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
snake_case_ : List[str] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
snake_case_ : List[Any] = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if version.parse(scb.__version__) < version.parse('''1.4.12'''):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[Any] , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , ):
"""simple docstring"""
UpperCAmelCase_ = len(references[0])
if any(len(_snake_case) != references_per_prediction for refs in references):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''')
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(_snake_case)]
UpperCAmelCase_ = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
UpperCAmelCase_ = sb_ter.corpus_score(_snake_case , _snake_case)
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 7 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Tuple = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : Union[str, Any] = '''bloom'''
UpperCAmelCase__ : Union[str, Any] = ['''past_key_values''']
UpperCAmelCase__ : List[str] = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Optional[Any] , _snake_case : Dict=250880 , _snake_case : str=64 , _snake_case : List[str]=2 , _snake_case : Tuple=8 , _snake_case : Any=1e-5 , _snake_case : List[str]=0.0_2 , _snake_case : Any=True , _snake_case : Dict=1 , _snake_case : List[Any]=2 , _snake_case : Dict=False , _snake_case : Tuple=0.0 , _snake_case : List[str]=0.0 , _snake_case : List[Any]=1 , _snake_case : Any=False , **_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ = kwargs.pop('''n_embed''' , _snake_case)
UpperCAmelCase_ = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = apply_residual_connection_post_layernorm
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = slow_but_exact
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case)
class __snake_case ( a ):
UpperCAmelCase__ : Tuple = version.parse('''1.12''' )
def __init__( self : int , _snake_case : PretrainedConfig , _snake_case : str = "default" , _snake_case : List[PatchingSpec] = None , _snake_case : bool = False , ):
"""simple docstring"""
super().__init__(_snake_case , task=_snake_case , patching_specs=_snake_case , use_past=_snake_case)
if not getattr(self._config , '''pad_token_id''' , _snake_case):
# TODO: how to do that better?
UpperCAmelCase_ = 0
@property
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}})
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' , inverted_values_shape=_snake_case)
UpperCAmelCase_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return self._config.n_head
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
return 1e-3
def lowerCamelCase ( self : Dict , _snake_case : "PreTrainedTokenizer" , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional["TensorType"] = None , ):
"""simple docstring"""
UpperCAmelCase_ = super(_snake_case , self).generate_dummy_inputs(
_snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case)
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ = self._config.hidden_size // self.num_attention_heads
UpperCAmelCase_ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCAmelCase_ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCAmelCase_ = [
(torch.zeros(_snake_case), torch.zeros(_snake_case)) for _ in range(self.num_layers)
]
UpperCAmelCase_ = common_inputs['''attention_mask''']
if self.use_past:
UpperCAmelCase_ = ordered_inputs['''attention_mask'''].dtype
UpperCAmelCase_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_snake_case , _snake_case , dtype=_snake_case)] , dim=1)
return ordered_inputs
@property
def lowerCamelCase ( self : int):
"""simple docstring"""
return 13
| 7 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 | 1 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def A (__A : Any=None ) -> Tuple:
"""simple docstring"""
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser('''env''' )
else:
UpperCAmelCase_ = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=__A , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=__A )
return parser
def A (__A : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = torch.__version__
UpperCAmelCase_ = torch.cuda.is_available()
UpperCAmelCase_ = is_xpu_available()
UpperCAmelCase_ = is_npu_available()
UpperCAmelCase_ = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__A ):
UpperCAmelCase_ = load_config_from_file(args.config_file ).to_dict()
UpperCAmelCase_ = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''PyTorch XPU available''': str(__A ),
'''PyTorch NPU available''': str(__A ),
'''System RAM''': F"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
UpperCAmelCase_ = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
UpperCAmelCase_ = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(__A , __A )
else F"""\t{accelerate_config}"""
)
print(__A )
UpperCAmelCase_ = accelerate_config
return info
def A () -> int:
"""simple docstring"""
UpperCAmelCase_ = env_command_parser()
UpperCAmelCase_ = parser.parse_args()
env_command(__A )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 7 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 |
from timeit import timeit
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
number &= number - 1
result += 1
return result
def A (__A : int ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
UpperCAmelCase_ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def A () -> None:
"""simple docstring"""
def do_benchmark(__A : int ) -> None:
UpperCAmelCase_ = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" )
UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" )
UpperCAmelCase_ = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 7 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : List[str] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __snake_case ( unittest.TestCase ):
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = TOKEN
HfFolder.save_token(_snake_case)
@classmethod
def lowerCamelCase ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''')
except HTTPError:
pass
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''test-config''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case , repo_id='''test-config''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained(F"""{USER}/test-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37)
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case , repo_id='''valid_org/test-config-org''' , push_to_hub=_snake_case , use_auth_token=self._token)
UpperCAmelCase_ = BertConfig.from_pretrained('''valid_org/test-config-org''')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case , getattr(_snake_case , _snake_case))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
CustomConfig.register_for_auto_class()
UpperCAmelCase_ = CustomConfig(attribute=42)
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''})
UpperCAmelCase_ = AutoConfig.from_pretrained(F"""{USER}/test-dynamic-config""" , trust_remote_code=_snake_case)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''')
self.assertEqual(new_config.attribute , 42)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
UpperCAmelCase_ = c.n_embd + 1 # int
UpperCAmelCase_ = c.resid_pdrop + 1.0 # float
UpperCAmelCase_ = not c.scale_attn_weights # bool
UpperCAmelCase_ = c.summary_type + '''foo''' # str
c.update_from_string(
F"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""")
self.assertEqual(_snake_case , c.n_embd , '''mismatch for key: n_embd''')
self.assertEqual(_snake_case , c.resid_pdrop , '''mismatch for key: resid_pdrop''')
self.assertEqual(_snake_case , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''')
self.assertEqual(_snake_case , c.summary_type , '''mismatch for key: summary_type''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = PretrainedConfig()
UpperCAmelCase_ = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''])
UpperCAmelCase_ = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case , _snake_case)]
if len(_snake_case) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
F""" {", ".join(_snake_case)}.""")
def lowerCamelCase ( self : str):
"""simple docstring"""
with self.assertRaises(_snake_case):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''')
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''')
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
UpperCAmelCase_ = mock.Mock()
UpperCAmelCase_ = 500
UpperCAmelCase_ = {}
UpperCAmelCase_ = HTTPError
UpperCAmelCase_ = {}
# Download this model to make sure it's in the cache.
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_snake_case) as mock_head:
UpperCAmelCase_ = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''')
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = AutoConfig.from_pretrained('''bert-base-cased''')
UpperCAmelCase_ = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case)
UpperCAmelCase_ = 2
json.dump(configuration.to_dict() , open(os.path.join(_snake_case , '''config.4.0.0.json''') , '''w'''))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
UpperCAmelCase_ = ['''config.42.0.0.json''']
UpperCAmelCase_ = 768
configuration.save_pretrained(_snake_case)
shutil.move(os.path.join(_snake_case , '''config.4.0.0.json''') , os.path.join(_snake_case , '''config.42.0.0.json'''))
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
self.assertEqual(new_configuration.hidden_size , 768)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
UpperCAmelCase_ = '''v4.0.0'''
UpperCAmelCase_ , UpperCAmelCase_ = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case , return_unused_kwargs=_snake_case)
self.assertEqual(new_configuration.hidden_size , 2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case , {})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
UpperCAmelCase_ = '''v3.0.0'''
UpperCAmelCase_ = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case)
self.assertEqual(old_configuration.hidden_size , 768)
| 7 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
snake_case_ : List[str] = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
snake_case_ : Optional[Any] = "</w>"
snake_case_ : Optional[int] = "@@ "
def A (__A : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = set()
UpperCAmelCase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ = char
return pairs
# Speech2Text2 has no max input length
snake_case_ : str = {"facebook/s2t-wav2vec2-large-en-de": 1024}
class __snake_case ( a ):
UpperCAmelCase__ : int = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : Tuple="<s>" , _snake_case : int="<pad>" , _snake_case : Tuple="</s>" , _snake_case : int="<unk>" , _snake_case : Optional[Any]=False , _snake_case : Optional[int]=None , **_snake_case : Optional[int] , ):
"""simple docstring"""
super().__init__(
unk_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , pad_token=_snake_case , do_lower_case=_snake_case , **_snake_case , )
UpperCAmelCase_ = do_lower_case
with open(_snake_case , encoding='''utf-8''') as vocab_handle:
UpperCAmelCase_ = json.load(_snake_case)
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""")
UpperCAmelCase_ = None
UpperCAmelCase_ = None
else:
with open(_snake_case , encoding='''utf-8''') as merges_handle:
UpperCAmelCase_ = merges_handle.read().split('''\n''')[:-1]
UpperCAmelCase_ = [tuple(merge.split()[:2]) for merge in merges]
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = {}
@property
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
return len(self.decoder)
def lowerCamelCase ( self : Any):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def lowerCamelCase ( self : List[Any] , _snake_case : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ = get_pairs(_snake_case)
if not pairs:
return token
while True:
UpperCAmelCase_ = min(_snake_case , key=lambda _snake_case: self.bpe_ranks.get(_snake_case , float('''inf''')))
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_ = bigram
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
while i < len(_snake_case):
try:
UpperCAmelCase_ = word.index(_snake_case , _snake_case)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
UpperCAmelCase_ = j
if word[i] == first and i < len(_snake_case) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
UpperCAmelCase_ = tuple(_snake_case)
UpperCAmelCase_ = new_word
if len(_snake_case) == 1:
break
else:
UpperCAmelCase_ = get_pairs(_snake_case)
UpperCAmelCase_ = ''' '''.join(_snake_case)
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase_ = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(_snake_case):
UpperCAmelCase_ = word.replace(_snake_case , '''''')
UpperCAmelCase_ = word.replace(''' ''' , _snake_case)
UpperCAmelCase_ = word
return word
def lowerCamelCase ( self : int , _snake_case : Union[str, Any]):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''')
if self.do_lower_case:
UpperCAmelCase_ = text.lower()
UpperCAmelCase_ = text.split()
UpperCAmelCase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_snake_case).split(''' ''')))
return split_tokens
def lowerCamelCase ( self : Optional[Any] , _snake_case : str):
"""simple docstring"""
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token))
def lowerCamelCase ( self : List[Any] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = self.decoder.get(_snake_case , self.unk_token)
return result
def lowerCamelCase ( self : Dict , _snake_case : List[str]):
"""simple docstring"""
UpperCAmelCase_ = ''' '''.join(_snake_case)
# make sure @@ tokens are concatenated
UpperCAmelCase_ = ''''''.join(string.split(_snake_case))
return string
def lowerCamelCase ( self : Optional[int] , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(_snake_case):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
UpperCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(_snake_case , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case) + '''\n''')
UpperCAmelCase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_snake_case , '''w''' , encoding='''utf-8''') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _snake_case: kv[1]):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''')
UpperCAmelCase_ = token_index
writer.write(''' '''.join(_snake_case) + '''\n''')
index += 1
return (vocab_file, merges_file)
| 7 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 1 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = IFPipeline
UpperCAmelCase__ : str = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCAmelCase__ : str = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase ( self : int):
"""simple docstring"""
return self._get_dummy_components()
def lowerCamelCase ( self : Optional[Any] , _snake_case : str , _snake_case : List[str]=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def lowerCamelCase ( self : Dict):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
self._test_save_load_local()
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa)
UpperCAmelCase_ = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=_snake_case , tokenizer=_snake_case)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
UpperCAmelCase_ , UpperCAmelCase_ = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase_ = None
UpperCAmelCase_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(_snake_case , _snake_case , _snake_case , _snake_case)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase_ = IFImgaImgPipeline(**pipe_a.components)
UpperCAmelCase_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(_snake_case , _snake_case , _snake_case , _snake_case)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase_ = IFInpaintingPipeline(**pipe_a.components)
UpperCAmelCase_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(_snake_case , _snake_case , _snake_case , _snake_case)
def lowerCamelCase ( self : List[Any] , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Dict):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Union[str, Any]):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
def lowerCamelCase ( self : Dict , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : List[Any]):
"""simple docstring"""
_start_torch_memory_measurement()
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1)).to(_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0)).to(_snake_case)
UpperCAmelCase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1)).to(_snake_case)
UpperCAmelCase_ = pipe_a(
prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(_snake_case , _snake_case)
def A () -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 7 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a )
class __snake_case ( a ):
def __init__( self : Tuple , *_snake_case : List[Any] , **_snake_case : Optional[Any]):
"""simple docstring"""
super().__init__(*_snake_case , **_snake_case)
self.check_model_type(_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : str=None , **_snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = {}, {}
if padding is not None:
UpperCAmelCase_ = padding
if truncation is not None:
UpperCAmelCase_ = truncation
if top_k is not None:
UpperCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , _snake_case : Union["Image.Image", str] , _snake_case : str = None , **_snake_case : str):
"""simple docstring"""
if isinstance(_snake_case , (Image.Image, str)) and isinstance(_snake_case , _snake_case):
UpperCAmelCase_ = {'''image''': image, '''question''': question}
else:
UpperCAmelCase_ = image
UpperCAmelCase_ = super().__call__(_snake_case , **_snake_case)
return results
def lowerCamelCase ( self : Union[str, Any] , _snake_case : int , _snake_case : Optional[int]=False , _snake_case : int=False):
"""simple docstring"""
UpperCAmelCase_ = load_image(inputs['''image'''])
UpperCAmelCase_ = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=_snake_case , truncation=_snake_case)
UpperCAmelCase_ = self.image_processor(images=_snake_case , return_tensors=self.framework)
model_inputs.update(_snake_case)
return model_inputs
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.model(**_snake_case)
return model_outputs
def lowerCamelCase ( self : str , _snake_case : Optional[Any] , _snake_case : List[str]=5):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(_snake_case)
else:
raise ValueError(F"""Unsupported framework: {self.framework}""")
UpperCAmelCase_ = scores.tolist()
UpperCAmelCase_ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_snake_case , _snake_case)]
| 7 | 1 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def A (__A : Namespace ) -> Dict:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
snake_case_ : int = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __snake_case ( a ):
@staticmethod
def lowerCamelCase ( _snake_case : ArgumentParser):
"""simple docstring"""
UpperCAmelCase_ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_snake_case , required=_snake_case , help='''Model\'s type.''')
train_parser.add_argument(
'''--tf_checkpoint''' , type=_snake_case , required=_snake_case , help='''TensorFlow checkpoint path or folder.''')
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_snake_case , required=_snake_case , help='''Path to the PyTorch saved model output.''')
train_parser.add_argument('''--config''' , type=_snake_case , default='''''' , help='''Configuration file path or folder.''')
train_parser.add_argument(
'''--finetuning_task_name''' , type=_snake_case , default=_snake_case , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_snake_case)
def __init__( self : Optional[int] , _snake_case : str , _snake_case : str , _snake_case : str , _snake_case : str , _snake_case : str , *_snake_case : List[Any] , ):
"""simple docstring"""
UpperCAmelCase_ = logging.get_logger('''transformers-cli/converting''')
self._logger.info(F"""Loading model {model_type}""")
UpperCAmelCase_ = model_type
UpperCAmelCase_ = tf_checkpoint
UpperCAmelCase_ = pytorch_dump_output
UpperCAmelCase_ = config
UpperCAmelCase_ = finetuning_task_name
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_snake_case)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
if "ckpt" in self._tf_checkpoint.lower():
UpperCAmelCase_ = self._tf_checkpoint
UpperCAmelCase_ = ''''''
else:
UpperCAmelCase_ = self._tf_checkpoint
UpperCAmelCase_ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_snake_case , self._config , self._pytorch_dump_output , _snake_case)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_snake_case)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''')
| 7 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 7 | 1 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def A (__A : np.ndarray , __A : np.ndarray ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__A , __A ) ) )
def A (__A : np.ndarray , __A : np.ndarray ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
UpperCAmelCase_ = (
'''Wrong input data\'s dimensions... '''
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__A )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase_ = (
'''Wrong input data\'s shape... '''
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
UpperCAmelCase_ = (
'''Input data have different datatype... '''
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__A )
UpperCAmelCase_ = []
for value in value_array:
UpperCAmelCase_ = euclidean(__A , dataset[0] )
UpperCAmelCase_ = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase_ = euclidean(__A , __A )
if dist > temp_dist:
UpperCAmelCase_ = temp_dist
UpperCAmelCase_ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def A (__A : np.ndarray , __A : np.ndarray ) -> float:
"""simple docstring"""
return np.dot(__A , __A ) / (norm(__A ) * norm(__A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
snake_case_ : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
snake_case_ : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
snake_case_ : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A (__A : List[Any] , __A : Optional[int] , __A : str , __A : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"""config.{attribute}""" in modeling_source
or F"""getattr(config, \"{attribute}\"""" in modeling_source
or F"""getattr(self.config, \"{attribute}\"""" in modeling_source
):
UpperCAmelCase_ = True
# Deal with multi-line cases
elif (
re.search(
RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , __A , )
is not None
):
UpperCAmelCase_ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCAmelCase_ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCAmelCase_ = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
UpperCAmelCase_ = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
UpperCAmelCase_ = True
if not attribute_used:
UpperCAmelCase_ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCAmelCase_ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCAmelCase_ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCAmelCase_ = True
elif attribute.endswith('''_token_id''' ):
UpperCAmelCase_ = True
# configuration class specific cases
if not case_allowed:
UpperCAmelCase_ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCAmelCase_ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A (__A : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCAmelCase_ = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']]
UpperCAmelCase_ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCAmelCase_ = {}
if len(config_class.attribute_map ) > 0:
UpperCAmelCase_ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCAmelCase_ = inspect.getsourcefile(__A )
UpperCAmelCase_ = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCAmelCase_ = [os.path.join(__A , __A ) for fn in os.listdir(__A ) if fn.startswith('''modeling_''' )]
# Get the source code strings
UpperCAmelCase_ = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
UpperCAmelCase_ = []
for config_param, default_value in zip(__A , __A ):
# `attributes` here is all the variant names for `config_param`
UpperCAmelCase_ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A , __A , __A , __A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def A () -> Any:
"""simple docstring"""
UpperCAmelCase_ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCAmelCase_ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda __A : inspect.isclass(__A )
and issubclass(__A , __A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCAmelCase_ = check_config_attributes_being_used(__A )
if len(__A ) > 0:
UpperCAmelCase_ = unused_attributes
if len(__A ) > 0:
UpperCAmelCase_ = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F"""{name}: {attributes}\n"""
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 7 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.