code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = [randint(-1000 , 1000 ) for i in range(10 )]
lowercase = randint(-5000 , 5000 )
return (arr, r)
lowercase__ :Dict = make_dataset()
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for triplet in permutations(lowerCAmelCase__ , 3 ):
if sum(lowerCAmelCase__ ) == target:
return tuple(sorted(lowerCAmelCase__ ) )
return (0, 0, 0)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
arr.sort()
lowercase = len(lowerCAmelCase__ )
for i in range(n - 1 ):
lowercase , lowercase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
lowercase = '''
triplet_sum1(*dataset)
'''
lowercase = '''
triplet_sum2(*dataset)
'''
lowercase = repeat(setup=lowerCAmelCase__ , stmt=lowerCAmelCase__ , repeat=5 , number=1_0000 )
lowercase = repeat(setup=lowerCAmelCase__ , stmt=lowerCAmelCase__ , repeat=5 , number=1_0000 )
return (min(lowerCAmelCase__ ), min(lowerCAmelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ :str = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 101 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
while b:
snake_case_ ,snake_case_ : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE__ ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 327 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =KandinskyInpaintPipeline
lowerCamelCase__ =['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
lowerCamelCase__ =[
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
lowerCamelCase__ =[
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ =False
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1_00
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Any = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : List[Any] = MultilingualCLIP(a_ )
__snake_case : Any = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Union[str, Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__snake_case : Union[str, Any] = UNetaDConditionModel(**a_ )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.dummy_text_encoder
__snake_case : Union[str, Any] = self.dummy_tokenizer
__snake_case : Union[str, Any] = self.dummy_unet
__snake_case : List[str] = self.dummy_movq
__snake_case : Optional[int] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=a_ , set_alpha_to_one=a_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=a_ , )
__snake_case : Tuple = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a_ ) ).to(a_ )
__snake_case : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a_ )
# create init_image
__snake_case : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a_ ) ).to(a_ )
__snake_case : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Union[str, Any] = Image.fromarray(np.uinta(a_ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create mask
__snake_case : List[str] = np.ones((64, 64) , dtype=np.floataa )
__snake_case : Any = 0
if str(a_ ).startswith('''mps''' ):
__snake_case : Optional[Any] = torch.manual_seed(a_ )
else:
__snake_case : Any = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''cpu'''
__snake_case : List[Any] = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**a_ )
__snake_case : Optional[Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case : Any = output.images
__snake_case : str = pipe(
**self.get_dummy_inputs(a_ ) , return_dict=a_ , )[0]
__snake_case : str = image[0, -3:, -3:, -1]
__snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__snake_case : Dict = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
__snake_case : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__snake_case : Any = np.ones((7_68, 7_68) , dtype=np.floataa )
__snake_case : Optional[Any] = 0
__snake_case : Tuple = '''a hat'''
__snake_case : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(a_ )
__snake_case : List[Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
__snake_case : List[str] = pipeline.to(a_ )
pipeline.set_progress_bar_config(disable=a_ )
__snake_case : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case , __snake_case : List[Any] = pipe_prior(
a_ , generator=a_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__snake_case : Dict = pipeline(
a_ , image=a_ , mask_image=a_ , image_embeds=a_ , negative_image_embeds=a_ , generator=a_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='''np''' , )
__snake_case : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(a_ , a_ )
| 102 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Dict = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Dict = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Dict = os.path.join(__a , __a )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving model to {ckpt_dir}""" )
snake_case_ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Optional[Any] = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[Any] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Tuple = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Tuple = (
os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
snake_case_ : List[Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
snake_case_ : Any = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : str = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : Any = os.path.join(__a , __a )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__a , __a )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Union[str, Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : List[Any] = os.path.join(__a , __a )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
snake_case_ : str = (
os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
snake_case_ : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
snake_case_ : Optional[int] = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 327 | 0 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : Tuple = 1_0
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[str] = [1, 2, 3, 4]
lowerCAmelCase_ : Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_)
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
lowerCAmelCase_ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_)
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
lowerCAmelCase_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_)
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Optional[int] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
lowerCAmelCase_ , lowerCAmelCase_ : Dict = process_story(A_)
self.assertEqual(A_ , [])
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[Any] = ''''''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = process_story(A_)
self.assertEqual(A_ , [])
self.assertEqual(A_ , [])
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = process_story(A_)
lowerCAmelCase_ : str = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(A_ , A_)
lowerCAmelCase_ : Dict = ['''It was the best of times.''']
self.assertEqual(A_ , A_)
def UpperCAmelCase__ ( self : Optional[Any]):
lowerCAmelCase_ : int = torch.tensor([1, 2, 3, 4])
lowerCAmelCase_ : List[Any] = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(A_ , 0).numpy() , expected.numpy())
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : str = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3])
lowerCAmelCase_ : int = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(A_ , 2_3).numpy() , expected.numpy())
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1])
lowerCAmelCase_ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(A_ , 1).numpy() , expected.numpy())
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Optional[Any] = 1_0_1
lowerCAmelCase_ : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]])
lowerCAmelCase_ : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
lowerCAmelCase_ : Optional[int] = compute_token_type_ids(A_ , A_)
np.testing.assert_array_equal(A_ , A_)
| 103 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : str = do_resize
snake_case_ : str = size if size is not None else {'shortest_edge': 288}
snake_case_ : Any = size_divisor
snake_case_ : Any = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : int = do_center_crop
snake_case_ : str = image_mean
snake_case_ : int = image_std
snake_case_ : Any = do_pad
snake_case_ : Optional[int] = batch_size
snake_case_ : List[str] = num_channels
snake_case_ : Any = min_resolution
snake_case_ : str = max_resolution
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int:
"""simple docstring"""
if not batched:
snake_case_ : Optional[int] = self.size['shortest_edge']
snake_case_ : List[Any] = image_inputs[0]
if isinstance(_A , Image.Image ):
snake_case_ ,snake_case_ : Optional[Any] = image.size
else:
snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2]
snake_case_ : Dict = size / min(_A , _A )
if h < w:
snake_case_ ,snake_case_ : str = size, scale * w
else:
snake_case_ ,snake_case_ : Tuple = scale * h, size
snake_case_ : Dict = int((1333 / 800) * size )
if max(_A , _A ) > max_size:
snake_case_ : Union[str, Any] = max_size / max(_A , _A )
snake_case_ : Any = newh * scale
snake_case_ : Union[str, Any] = neww * scale
snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 )
snake_case_ ,snake_case_ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case_ : Optional[int] = []
for image in image_inputs:
snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(_A , key=lambda _A : item[0] )[0]
snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'size_divisor' ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 327 | 0 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCAmelCase__ = re.compile(R'''([A-Z]+)([A-Z][a-z])''')
lowerCAmelCase__ = re.compile(R'''([a-z\d])([A-Z])''')
lowerCAmelCase__ = re.compile(R'''(?<!_)_(?!_)''')
lowerCAmelCase__ = re.compile(R'''(_{2,})''')
lowerCAmelCase__ = R'''^\w+(\.\w+)*$'''
lowerCAmelCase__ = R'''<>:/\|?*'''
def _A ( A__ ):
"""simple docstring"""
__lowercase = _uppercase_uppercase_re.sub(R'''\1_\2''' , A__ )
__lowercase = _lowercase_uppercase_re.sub(R'''\1_\2''' , A__ )
return name.lower()
def _A ( A__ ):
"""simple docstring"""
__lowercase = _single_underscore_re.split(A__ )
__lowercase = [_multiple_underscores_re.split(A__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A__ ) if n != '''''' )
def _A ( A__ ):
"""simple docstring"""
if os.path.basename(A__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A__ )
def _A ( A__ , A__ ):
"""simple docstring"""
if os.path.basename(A__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A__ ):
raise ValueError(F"Split name should match '{_split_re}'' but got '{split}'." )
return F"{filename_prefix_for_name(A__ )}-{split}"
def _A ( A__ , A__ , A__ , A__=None ):
"""simple docstring"""
__lowercase = filename_prefix_for_split(A__ , A__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
__lowercase = os.path.join(A__ , A__ )
return F"{filepath}*"
def _A ( A__ , A__ , A__ , A__=None , A__=None ):
"""simple docstring"""
__lowercase = filename_prefix_for_split(A__ , A__ )
__lowercase = os.path.join(A__ , A__ )
if shard_lengths:
__lowercase = len(A__ )
__lowercase = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A__ )]
if filetype_suffix:
__lowercase = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
__lowercase = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 104 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
lowerCamelCase : List[str]
lowerCamelCase : Optional[str] =None
# Automatically constructed
lowerCamelCase : ClassVar[str] ="dict"
lowerCamelCase : ClassVar[Any] =None
lowerCamelCase : str =field(default="""Translation""" , init=a__ , repr=a__ )
def __call__( self ) -> Optional[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __a ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
lowerCamelCase : Optional[List] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[str] =None
# Automatically constructed
lowerCamelCase : ClassVar[str] ="dict"
lowerCamelCase : ClassVar[Any] =None
lowerCamelCase : str =field(default="""TranslationVariableLanguages""" , init=a__ , repr=a__ )
def __a ( self ) -> Optional[Any]:
a : str = sorted(set(self.languages ) ) if self.languages else None
a : Tuple = len(self.languages ) if self.languages else None
def __call__( self ) -> Any:
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : Optional[int] = set(self.languages )
if self.languages and set(lowerCAmelCase__ ) - lang_set:
raise ValueError(
f"""Some languages in example ({', '.join(sorted(set(lowerCAmelCase__ ) - lang_set ) )}) are not in valid set ({', '.join(lowerCAmelCase__ )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a : List[str] = []
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a, a : Union[str, Any] = zip(*sorted(lowerCAmelCase__ ) )
return {"language": languages, "translation": translations}
def __a ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 105 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ):
snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
_SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 327 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase_ : List[str] ,lowercase_ : Any=None ,lowercase_ : Any=None ,lowercase_ : Optional[int]=None ,lowercase_ : Optional[int]="resnet50" ,lowercase_ : str=3 ,lowercase_ : List[str]=3_2 ,lowercase_ : Any=3 ,lowercase_ : Dict=True ,lowercase_ : Union[str, Any]=True ,):
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : List[str] = out_indices if out_indices is not None else [4]
lowerCAmelCase__ : List[str] = stage_names
lowerCAmelCase__ : Optional[int] = out_features
lowerCAmelCase__ : int = backbone
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Tuple = use_pretrained_backbone
lowerCAmelCase__ : Dict = is_training
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = self.get_config()
return config, pixel_values
def __lowerCAmelCase ( self : List[str] ):
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __lowerCAmelCase ( self : int ,lowercase_ : str ,lowercase_ : Dict ):
lowerCAmelCase__ : int = TimmBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 1_4, 1_4) ,)
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (TimmBackbone,) if is_torch_available() else ()
lowercase__ = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : List[Any] = TimmBackboneModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ )
def __lowerCAmelCase ( self : Any ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : List[str] = '''resnet18'''
lowerCAmelCase__ : List[str] = '''microsoft/resnet-18'''
lowerCAmelCase__ : Tuple = AutoBackbone.from_pretrained(lowercase_ ,use_timm_backbone=lowercase_ )
lowerCAmelCase__ : List[Any] = AutoBackbone.from_pretrained(lowercase_ )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
lowerCAmelCase__ : List[str] = AutoBackbone.from_pretrained(lowercase_ ,use_timm_backbone=lowercase_ ,out_indices=[1, 2, 3] )
lowerCAmelCase__ : List[str] = AutoBackbone.from_pretrained(lowercase_ ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def __lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self : Dict ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def __lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self : Union[str, Any] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def __lowerCAmelCase ( self : Optional[Any] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def __lowerCAmelCase ( self : List[str] ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def __lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : List[Any] ):
pass
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ ,lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(lowercase_ )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase__ : List[Any] = self.all_model_classes[0]
lowerCAmelCase__ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
lowerCAmelCase__ : Optional[int] = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Any = model(**lowercase_ )
lowerCAmelCase__ : List[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase__ : List[Any] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase__ : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowercase_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Tuple = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase__ : List[str] = copy.deepcopy(lowercase_ )
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(**lowercase_ )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase__ : List[str] = copy.deepcopy(lowercase_ )
lowerCAmelCase__ : int = False
lowerCAmelCase__ : Dict = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Any = model(**lowercase_ )
| 106 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: List[Any] = VOCAB_FILES_NAMES
__magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION
__magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Union[str, Any] = LxmertTokenizer
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : List[Any] = normalizer_class(**_A )
snake_case_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : str = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 327 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__lowerCAmelCase : Tuple = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowerCAmelCase : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __magic_name__ ( A : str ):
'''simple docstring'''
with open(A, "rb" ) as f:
a = Image.open(A )
return im.convert("RGB" )
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the training data."""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_UpperCamelCase , metadata={"""help""": """A folder containing the validation data."""} )
SCREAMING_SNAKE_CASE_ : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(_UpperCamelCase )} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
SCREAMING_SNAKE_CASE_ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE_ : str = field(default=_UpperCamelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
SCREAMING_SNAKE_CASE_ : bool = field(
default=_UpperCamelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __magic_name__ ( A : Dict ):
'''simple docstring'''
a = torch.stack([example["pixel_values"] for example in examples] )
a = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a , a , a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification", A, A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a = training_args.get_process_log_level()
logger.setLevel(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
a = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task="image-classification", use_auth_token=True if model_args.use_auth_token else None, )
else:
a = {}
if data_args.train_dir is not None:
a = os.path.join(data_args.train_dir, "**" )
if data_args.validation_dir is not None:
a = os.path.join(data_args.validation_dir, "**" )
a = load_dataset(
"imagefolder", data_files=A, cache_dir=model_args.cache_dir, task="image-classification", )
# If we don't have a validation split, split off a percentage of train as validation.
a = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, A ) and data_args.train_val_split > 0.0:
a = dataset["train"].train_test_split(data_args.train_val_split )
a = split["train"]
a = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
a = dataset["train"].features["labels"].names
a , a = {}, {}
for i, label in enumerate(A ):
a = str(A )
a = label
# Load the accuracy metric from the datasets package
a = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A : int ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
a = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(A ), labelaid=A, idalabel=A, finetuning_task="image-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
a = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=A, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
a = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
a = image_processor.size["shortest_edge"]
else:
a = (image_processor.size["height"], image_processor.size["width"])
a = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
a = Compose(
[
RandomResizedCrop(A ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
a = Compose(
[
Resize(A ),
CenterCrop(A ),
ToTensor(),
normalize,
] )
def train_transforms(A : Dict ):
a = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A : List[str] ):
a = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
a = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
a = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A )
# Initalize our trainer
a = Trainer(
model=A, args=A, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=A, tokenizer=A, data_collator=A, )
# Training
if training_args.do_train:
a = None
if training_args.resume_from_checkpoint is not None:
a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a = last_checkpoint
a = trainer.train(resume_from_checkpoint=A )
trainer.save_model()
trainer.log_metrics("train", train_result.metrics )
trainer.save_metrics("train", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a = trainer.evaluate()
trainer.log_metrics("eval", A )
trainer.save_metrics("eval", A )
# Write model card and (optionally) push to hub
a = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A )
else:
trainer.create_model_card(**A )
if __name__ == "__main__":
main()
| 107 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
snake_case_ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 0:
return False
snake_case_ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path / "cache"
lowerCAmelCase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : List[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = tmp_path / "cache"
lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[int] = features.copy() if features else default_expected_features
lowerCAmelCase : Tuple = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Optional[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : int = tmp_path / "cache"
lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : List[Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : str = parquet_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Tuple = [parquet_path]
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[int] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
lowerCAmelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : List[str] = tmp_path / "cache"
lowerCAmelCase : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : List[Any] = tmp_path / "cache"
lowerCAmelCase : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Optional[int] = features.copy() if features else default_expected_features
lowerCAmelCase : int = (
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase : Tuple = ParquetDatasetReader({"train": parquet_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if split:
lowerCAmelCase : Any = {split: parquet_path}
else:
lowerCAmelCase : Optional[int] = "train"
lowerCAmelCase : Union[str, Any] = {"train": parquet_path, "test": parquet_path}
lowerCAmelCase : Tuple = tmp_path / "cache"
lowerCAmelCase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase : Union[str, Any] = ParquetDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : List[Any] = pq.ParquetFile(tmp_path / "foo.parquet" )
lowerCAmelCase : Optional[int] = pf.read()
assert dataset.data.table == output_table
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = str(shared_datadir / "test_image_rgb.jpg" )
lowerCAmelCase : Optional[Any] = {"image": [image_path]}
lowerCAmelCase : List[str] = Features({"image": Image()} )
lowerCAmelCase : List[Any] = Dataset.from_dict(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Tuple = ParquetDatasetWriter(SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
lowerCAmelCase : Optional[int] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase : Tuple = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
assert get_writer_batch_size(SCREAMING_SNAKE_CASE ) == expected
| 108 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : Optional[int] = seq_length
UpperCAmelCase : int = is_training
UpperCAmelCase : str = use_token_type_ids
UpperCAmelCase : Optional[int] = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Optional[Any] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = intermediate_size
UpperCAmelCase : int = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : Any = type_sequence_label_size
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : Union[str, Any] = num_labels
UpperCAmelCase : Any = num_choices
UpperCAmelCase : str = scope
UpperCAmelCase : str = self.vocab_size - 1
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Any = None
if self.use_token_type_ids:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Tuple = None
if self.use_labels:
UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = OpenAIGPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = OpenAIGPTLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Tuple = OpenAIGPTDoubleHeadsModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
UpperCAmelCase : str = self.num_labels
UpperCAmelCase : Optional[int] = OpenAIGPTForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Any = config_and_inputs
UpperCAmelCase : Dict = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase : Union[str, Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowerCAmelCase : int = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = inputs_dict["""labels"""]
UpperCAmelCase : List[Any] = inputs_dict["""labels"""]
UpperCAmelCase : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : str = OpenAIGPTModelTester(self )
UpperCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , n_embd=37 )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Optional[Any] = OpenAIGPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) # the president is
UpperCAmelCase : Optional[int] = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase : List[Any] = model.generate(_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].tolist() , _SCREAMING_SNAKE_CASE )
| 109 |
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any:
"""simple docstring"""
if tokenize_kwargs is None:
snake_case_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
snake_case_ : int = truncation
snake_case_ : Optional[int] = tokenize_kwargs
snake_case_ : Dict = {}
if return_tensors is not None:
snake_case_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]:
"""simple docstring"""
snake_case_ : Dict = self.framework
snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A )
return model_inputs
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Tuple = self.model(**_A )
return model_outputs
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 327 | 0 |
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :str = val
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :int = None
def __lowerCAmelCase ( self , __A ) -> List[Any]:
if self.val:
if val < self.val:
if self.left is None:
lowerCAmelCase_ :Union[str, Any] = Node(_A )
else:
self.left.insert(_A )
elif val > self.val:
if self.right is None:
lowerCAmelCase_ :Optional[Any] = Node(_A )
else:
self.right.insert(_A )
else:
lowerCAmelCase_ :Optional[Any] = val
def _snake_case ( lowercase__ : str , lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
if root:
inorder(root.left , __a )
res.append(root.val )
inorder(root.right , __a )
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
if len(__a ) == 0:
return arr
lowerCAmelCase_ :int = Node(arr[0] )
for i in range(1 , len(__a ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCAmelCase_ :List[str] = []
inorder(__a , __a )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 84 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( __a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Any = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __a = 10 ):
return sum(
int(''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ (snake_case_ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :int = KandinskyInpaintPipeline
__lowerCAmelCase :List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
__lowerCAmelCase :str = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__lowerCAmelCase :str = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCAmelCase :Optional[Any] = False
@property
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
return 3_2
@property
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
return 1_0_0
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
a__ : Union[str, Any] = MultilingualCLIP(_A )
a__ : Tuple = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Tuple = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
a__ : Dict = UNetaDConditionModel(**_A )
return model
@property
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a__ : str = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : List[str] = self.dummy_text_encoder
a__ : List[str] = self.dummy_tokenizer
a__ : Any = self.dummy_unet
a__ : Optional[Any] = self.dummy_movq
a__ : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_A , )
a__ : Dict = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=0 ) -> Any:
"""simple docstring"""
a__ : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_A ) ).to(_A )
a__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_A )
# create init_image
a__ : int = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
a__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a__ : Any = Image.fromarray(np.uinta(_A ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
a__ : Optional[int] = np.ones((6_4, 6_4) , dtype=np.floataa )
a__ : List[Any] = 0
if str(_A ).startswith("""mps""" ):
a__ : List[Any] = torch.manual_seed(_A )
else:
a__ : int = torch.Generator(device=_A ).manual_seed(_A )
a__ : Optional[Any] = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Union[str, Any] = 'cpu'
a__ : List[str] = self.get_dummy_components()
a__ : List[Any] = self.pipeline_class(**_A )
a__ : Union[str, Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
a__ : Optional[Any] = pipe(**self.get_dummy_inputs(_A ) )
a__ : Union[str, Any] = output.images
a__ : Any = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
a__ : List[str] = image[0, -3:, -3:, -1]
a__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
a__ : Dict = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
a__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
a__ : Dict = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
a__ : List[str] = 0
a__ : List[Any] = 'a hat'
a__ : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
a__ : Optional[Any] = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
a__ : List[str] = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
a__ : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
a__ : Union[str, Any] = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
a__ : List[str] = pipeline(
_A , image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type="""np""" , )
a__ : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 170 |
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Any = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A__ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_A , cache_dir=_A)
A__ = [t[-1] for t in os.walk(os.path.join(_A , os.listdir(_A)[0] , '''snapshots'''))]
A__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]:
'''simple docstring'''
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_A)
A__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A__ = jax.random.PRNGKey(0)
A__ = 4
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(_A)
# shard inputs and rng
A__ = replicate(_A)
A__ = jax.random.split(_A , _A)
A__ = shard(_A)
A__ = pipeline(_A , _A , _A , _A , jit=_A).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1514745) < 1e-3
assert np.abs(np.abs(_A , dtype=np.floataa).sum() - 49_947.875) < 5e-1
A__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(_A) == num_samples
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=_A)
A__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A__ = jax.random.PRNGKey(0)
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(_A)
# shard inputs and rng
A__ = replicate(_A)
A__ = jax.random.split(_A , _A)
A__ = shard(_A)
A__ = pipeline(_A , _A , _A , _A , jit=_A).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05652401)) < 1e-3
assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_383_808.2)) < 5e-1
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[int]:
'''simple docstring'''
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_A)
A__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A__ = jax.random.PRNGKey(0)
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(_A)
# shard inputs and rng
A__ = replicate(_A)
A__ = jax.random.split(_A , _A)
A__ = shard(_A)
A__ = pipeline(_A , _A , _A , _A , jit=_A).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04003906)) < 1e-3
assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_373_516.75)) < 5e-1
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa)
A__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A__ = jax.random.PRNGKey(0)
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(_A)
# shard inputs and rng
A__ = replicate(_A)
A__ = jax.random.split(_A , _A)
A__ = shard(_A)
A__ = pipeline(_A , _A , _A , _A , jit=_A).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04003906)) < 1e-3
assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_373_516.75)) < 5e-1
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
A__ = FlaxDDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=_A , steps_offset=1 , )
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=_A , safety_checker=_A , )
A__ = scheduler.create_state()
A__ = scheduler_state
A__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A__ = jax.random.PRNGKey(0)
A__ = 50
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = pipeline.prepare_inputs(_A)
# shard inputs and rng
A__ = replicate(_A)
A__ = jax.random.split(_A , _A)
A__ = shard(_A)
A__ = pipeline(_A , _A , _A , _A , jit=_A).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.045043945)) < 1e-3
assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_347_693.5)) < 5e-1
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
A__ = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A__ = jax.device_count()
A__ = num_samples * [prompt]
A__ = jax.random.split(jax.random.PRNGKey(0) , _A)
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_A , )
A__ = replicate(_A)
A__ = pipeline.prepare_inputs(_A)
A__ = shard(_A)
A__ = pipeline(_A , _A , _A , jit=_A).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_A , use_memory_efficient_attention=_A , )
A__ = replicate(_A)
A__ = pipeline.prepare_inputs(_A)
A__ = shard(_A)
A__ = pipeline(_A , _A , _A , jit=_A).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2
| 14 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a_ = """pt"""
elif is_tf_available():
a_ = """tf"""
else:
a_ = """jax"""
class __lowerCAmelCase ( snake_case_ , unittest.TestCase ):
lowerCAmelCase__ = PerceiverTokenizer
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
__lowerCamelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def lowerCamelCase ( self , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=20 , __UpperCAmelCase=5 ):
'''simple docstring'''
__lowerCamelCase = []
for i in range(len(_A ) ):
try:
__lowerCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase = list(filter(lambda __UpperCAmelCase : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , _A ) )
__lowerCamelCase = list(filter(lambda __UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__lowerCamelCase = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__lowerCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__lowerCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__lowerCamelCase = ' ' + output_txt
__lowerCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = 'Unicode €.'
__lowerCamelCase = tokenizer(_A )
__lowerCamelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
__lowerCamelCase = tokenizer.decode(_A )
self.assertEqual(_A , '''[CLS]Unicode €.[SEP]''' )
__lowerCamelCase = tokenizer('''e è é ê ë''' )
__lowerCamelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , _A )
# decoding
__lowerCamelCase = tokenizer.decode(_A )
self.assertEqual(_A , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__lowerCamelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__lowerCamelCase = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__lowerCamelCase = list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCamelCase = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _A )
self.assertIn('''attention_mask''' , _A )
self.assertNotIn('''decoder_input_ids''' , _A )
self.assertNotIn('''decoder_attention_mask''' , _A )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.perceiver_tokenizer
__lowerCamelCase = [
'Summary of the text.',
'Another summary.',
]
__lowerCamelCase = tokenizer(
text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = ' He is very happy, UNwant\u00E9d,running'
__lowerCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__lowerCamelCase = tokenizer.__class__.from_pretrained(_A )
__lowerCamelCase = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__lowerCamelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__lowerCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowerCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__lowerCamelCase = tokenizer.__class__.from_pretrained(_A )
__lowerCamelCase = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCamelCase = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__lowerCamelCase = json.load(_A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__lowerCamelCase = json.load(_A )
__lowerCamelCase = [F"""<extra_id_{i}>""" for i in range(125 )]
__lowerCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
__lowerCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )]
__lowerCamelCase = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
pass
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCamelCase = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__lowerCamelCase = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
| 330 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : Tuple = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[Any] = mask_ratio
snake_case_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A )
snake_case_ : str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = TFViTMAEForPreTraining(_A )
snake_case_ : Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Dict = TFViTMAEForPreTraining(_A )
snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[str] = model(_A , training=_A )
snake_case_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs
snake_case_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__magic_name__: Dict = False
__magic_name__: Dict = False
__magic_name__: List[Any] = False
__magic_name__: Dict = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = TFViTMAEModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_A )
snake_case_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A )
snake_case_ : List[str] = model(_A , noise=_A )
snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
snake_case_ : str = model(**_A , noise=_A )
snake_case_ : Union[str, Any] = outputs_dict[0].numpy()
snake_case_ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_A : int ):
snake_case_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
snake_case_ : str = v.numpy()
else:
snake_case_ : Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_A )
snake_case_ : List[Any] = self._prepare_for_class(_A , _A )
snake_case_ : Any = prepare_numpy_arrays(_A )
snake_case_ : List[Any] = model(_A , noise=_A )
snake_case_ : List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , '_keras_serializable' , _A )
}
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Optional[Any] = main_layer_class(_A )
snake_case_ : List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
snake_case_ : int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' )
model.save(_A )
snake_case_ : str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
snake_case_ : List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A )
snake_case_ : int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Any = outputs.last_hidden_state.numpy()
snake_case_ : Optional[int] = 0
else:
snake_case_ : str = outputs.logits.numpy()
snake_case_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
snake_case_ : Any = model_class.from_pretrained(_A )
snake_case_ : Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Dict = after_outputs['last_hidden_state'].numpy()
snake_case_ : Dict = 0
else:
snake_case_ : Any = after_outputs['logits'].numpy()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
snake_case_ : int = self._prepare_for_class(_A , _A )
snake_case_ : str = model(_A , noise=_A )
snake_case_ : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
snake_case_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : str = model_class.from_config(model.config )
snake_case_ : Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : int = ViTMAEConfig()
snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Optional[Any] = model(**_A , noise=_A )
# verify the logits
snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 327 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
lowercase_ = ["note_seq"]
def __init__( self : Optional[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Optional[Any] ):
requires_backends(self , ['note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : Dict , *_lowerCAmelCase : str , **_lowerCAmelCase : int ):
requires_backends(cls , ['note_seq'] )
@classmethod
def lowerCAmelCase_ ( cls : Any , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any] ):
requires_backends(cls , ['note_seq'] ) | 225 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 327 | 0 |
"""simple docstring"""
from math import pi
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 266 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Dict = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : int = {
'gpt-neox-20b': 2048,
}
class UpperCAmelCase_ ( snake_case_ ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any="<|endoftext|>" , UpperCamelCase__ : Optional[int]="<|endoftext|>" , UpperCamelCase__ : Optional[Any]="<|endoftext|>" , UpperCamelCase__ : List[Any]=False , **UpperCamelCase__ : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
__magic_name__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _A ) != add_prefix_space:
__magic_name__ = getattr(_A , pre_tok_state.pop("""type""" ) )
__magic_name__ = add_prefix_space
__magic_name__ = pre_tok_class(**_A )
__magic_name__ = add_prefix_space
def _lowercase ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__magic_name__ = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _lowercase ( self : Dict , UpperCamelCase__ : "Conversation" ) -> List[int]:
"""simple docstring"""
__magic_name__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__magic_name__ = input_ids[-self.model_max_length :]
return input_ids
| 88 |
from math import pi
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 327 | 0 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a_ = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
inspect_dataset(__a , __a )
__lowercase : Union[str, Any] = path + '.py'
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
inspect_metric(__a , __a )
__lowercase : Any = path + '.py'
assert script_name in os.listdir(__a )
assert "__pycache__" not in os.listdir(__a )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Union[str, Any] = get_dataset_config_info(__a , config_name=__a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
with pytest.raises(__a ):
get_dataset_config_info(__a , config_name=__a )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
__lowercase : Dict = get_dataset_config_names(__a )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = get_dataset_infos(__a )
assert list(infos.keys() ) == expected_configs
__lowercase : Union[str, Any] = expected_configs[0]
assert expected_config in infos
__lowercase : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Any = get_dataset_infos(__a )
assert expected_config in infos
__lowercase : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
with pytest.raises(__a ):
get_dataset_split_names(__a , config_name=__a )
| 249 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256}
snake_case_ : Tuple = get_size_dict(_A )
snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ : int = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : str = size
snake_case_ : List[str] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_A )
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Optional[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 327 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase_ ( _snake_case ):
return np.maximum(0 ,__a )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 25 |
import sys
_SCREAMING_SNAKE_CASE = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE__ ( __a = N ):
snake_case_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(__a ) - 12 ):
snake_case_ : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : int = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowercase :Any = TapasConfig.from_json_file(__a )
# set absolute/relative position embeddings parameter
lowercase :List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowercase :int = TapasForQuestionAnswering(config=__a )
elif task == "WTQ":
# run_task_main.py hparams
lowercase :Optional[int] = 4
lowercase :Any = True
# hparam_utils.py hparams
lowercase :Optional[int] = 0.664_694
lowercase :Dict = 0.207_951
lowercase :Optional[int] = 0.121_194
lowercase :int = True
lowercase :Union[str, Any] = True
lowercase :Union[str, Any] = False
lowercase :Optional[Any] = 0.0_352_513
lowercase :Union[str, Any] = TapasForQuestionAnswering(config=__a )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowercase :int = 4
lowercase :List[str] = False
# hparam_utils.py hparams
lowercase :Tuple = 36.4_519
lowercase :Optional[int] = 0.903_421
lowercase :Tuple = 222.088
lowercase :List[Any] = True
lowercase :Optional[Any] = True
lowercase :str = True
lowercase :Any = 0.763_141
lowercase :Dict = TapasForQuestionAnswering(config=__a )
elif task == "TABFACT":
lowercase :List[str] = TapasForSequenceClassification(config=__a )
elif task == "MLM":
lowercase :Any = TapasForMaskedLM(config=__a )
elif task == "INTERMEDIATE_PRETRAINING":
lowercase :Tuple = TapasModel(config=__a )
else:
raise ValueError(F"Task {task} not supported." )
print(F"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__a, __a, __a )
# Save pytorch-model (weights and configuration)
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__a )
# Save tokenizer files
print(F"Save tokenizer files to {pytorch_dump_path}" )
lowercase :Union[str, Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512 )
tokenizer.save_pretrained(__a )
print("Used relative position embeddings:", model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
)
parser.add_argument(
"--reset_position_index_per_cell",
default=False,
action="store_true",
help="Whether to use relative position embeddings or not. Defaults to True.",
)
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--tapas_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained TAPAS model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 236 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__: Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__: Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} )
__magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__: bool = field(
default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__a , __a , getattr(__a , __a ) )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case_ : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case_ : List[Any] = SeqaSeqDataset
# Get datasets
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
snake_case_ : List[str] = (
dataset_class(
__a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case_ : Any = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
snake_case_ : List[str] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
snake_case_ : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
snake_case_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case_ : Tuple = train_result.metrics
snake_case_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' )
snake_case_ : str = data_args.n_val
snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' )
snake_case_ : Union[str, Any] = test_output.metrics
snake_case_ : int = data_args.n_test
if trainer.is_world_process_zero():
snake_case_ : List[str] = round(metrics['test_loss'] , 4 )
handle_metrics('test' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
snake_case_ : Any = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
snake_case_ : Any = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327 | 0 |
from statistics import mean, stdev
def snake_case ( snake_case__ :str , snake_case__ :Optional[Any] = 3) -> Union[str, Any]:
_A = min(__a)
_A = max(__a)
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a) for x in data]
def snake_case ( snake_case__ :int , snake_case__ :List[str] = 3) -> List[Any]:
_A = mean(__a)
_A = stdev(__a)
# standardize data
return [round((x - mu) / (sigma) , __a) for x in data]
| 180 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""PoolFormerFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 327 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
UpperCAmelCase_ :Optional[torch.FloatTensor] = None
UpperCAmelCase_ :torch.FloatTensor = None
UpperCAmelCase_ :Optional[Tuple[torch.FloatTensor]] = None
UpperCAmelCase_ :Optional[Tuple[torch.FloatTensor]] = None
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self , __A=1 , __A=0 , __A=2 , __A=512 , __A="cls" , __A=False , __A=True , **__A , ) -> int:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
lowerCAmelCase_ :List[Any] = project_dim
lowerCAmelCase_ :Optional[Any] = pooler_fn
lowerCAmelCase_ :Dict = learn_encoder
lowerCAmelCase_ :Dict = use_attention_mask
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
UpperCAmelCase_ :Union[str, Any] = [r"pooler", r"logit_scale"]
UpperCAmelCase_ :int = [r"position_ids", r"predictions.decoder.bias"]
UpperCAmelCase_ :Optional[Any] = "roberta"
UpperCAmelCase_ :Dict = RobertaSeriesConfig
def __init__( self , __A ) -> Any:
super().__init__(_A )
lowerCAmelCase_ :Optional[Any] = XLMRobertaModel(_A )
lowerCAmelCase_ :int = nn.Linear(config.hidden_size , config.project_dim )
lowerCAmelCase_ :List[str] = getattr(_A , """has_pre_transformation""" , _A )
if self.has_pre_transformation:
lowerCAmelCase_ :str = nn.Linear(config.hidden_size , config.project_dim )
lowerCAmelCase_ :Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __lowerCAmelCase ( self , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , ) -> int:
lowerCAmelCase_ :List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ :int = self.base_model(
input_ids=_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_attentions=_A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_A , )
if self.has_pre_transformation:
lowerCAmelCase_ :str = outputs['hidden_states'][-2]
lowerCAmelCase_ :Tuple = self.pre_LN(_A )
lowerCAmelCase_ :List[str] = self.transformation_pre(_A )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowerCAmelCase_ :int = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 84 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A )
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' )
snake_case_ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ : Optional[Any] = model.generate(**_A )
snake_case_ : int = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
snake_case_ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 327 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_lowercase : Any =(
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : int) -> int:
"""simple docstring"""
warnings.warn(__a , __a)
requires_backends(__a , """sklearn""")
return (preds == labels).mean()
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : int) -> str:
"""simple docstring"""
warnings.warn(__a , __a)
requires_backends(__a , """sklearn""")
a__ : int = simple_accuracy(__a , __a)
a__ : Tuple = fa_score(y_true=__a , y_pred=__a)
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : List[str]) -> Dict:
"""simple docstring"""
warnings.warn(__a , __a)
requires_backends(__a , """sklearn""")
a__ : List[str] = pearsonr(__a , __a)[0]
a__ : Optional[Any] = spearmanr(__a , __a)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : int , _lowercase : Dict) -> str:
"""simple docstring"""
warnings.warn(__a , __a)
requires_backends(__a , """sklearn""")
assert len(__a) == len(__a), F'''Predictions and labels have mismatched lengths {len(__a)} and {len(__a)}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(__a , __a)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(__a , __a)}
elif task_name == "mrpc":
return acc_and_fa(__a , __a)
elif task_name == "sts-b":
return pearson_and_spearman(__a , __a)
elif task_name == "qqp":
return acc_and_fa(__a , __a)
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(__a , __a)}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(__a , __a)}
elif task_name == "qnli":
return {"acc": simple_accuracy(__a , __a)}
elif task_name == "rte":
return {"acc": simple_accuracy(__a , __a)}
elif task_name == "wnli":
return {"acc": simple_accuracy(__a , __a)}
elif task_name == "hans":
return {"acc": simple_accuracy(__a , __a)}
else:
raise KeyError(__a)
def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any]) -> Dict:
"""simple docstring"""
warnings.warn(__a , __a)
requires_backends(__a , """sklearn""")
if len(__a) != len(__a):
raise ValueError(F'''Predictions and labels have mismatched lengths {len(__a)} and {len(__a)}''')
if task_name == "xnli":
return {"acc": simple_accuracy(__a , __a)}
else:
raise KeyError(__a)
| 170 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = params
snake_case_ : int = np.array(_A )
snake_case_ : Optional[int] = np.array([len(_A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple , _A : Optional[int] ) -> str:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[str] ) -> str:
"""simple docstring"""
return len(self.lengths )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.params.max_model_input_size
snake_case_ : Tuple = self.lengths > max_len
logger.info(F"""Splitting {sum(_A )} too long sequences.""" )
def divide_chunks(_A : Union[str, Any] , _A : Dict ):
return [l[i : i + n] for i in range(0 , len(_A ) , _A )]
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = []
if self.params.mlm:
snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Optional[int] = np.insert(_A , 0 , _A )
if sub_s[-1] != sep_id:
snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A )
assert len(_A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_A )
new_tok_ids.extend(_A )
new_lengths.extend([len(_A ) for l in sub_seqs] )
snake_case_ : Tuple = np.array(_A )
snake_case_ : int = np.array(_A )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = len(self )
snake_case_ : int = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : List[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = len(self )
snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : Any = (unk_occs / self.lengths) < 0.5
snake_case_ : List[Any] = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : Tuple = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = [t[0] for t in batch]
snake_case_ : int = [t[1] for t in batch]
assert len(_A ) == len(_A )
# Max for paddings
snake_case_ : str = max(_A )
# Pad token ids
if self.params.mlm:
snake_case_ : int = self.params.special_tok_ids['pad_token']
else:
snake_case_ : Dict = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids]
assert len(tk_ ) == len(_A )
assert all(len(_A ) == max_seq_len_ for t in tk_ )
snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs)
return tk_t, lg_t
| 327 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase : int = {
"""configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongT5EncoderModel""",
"""LongT5ForConditionalGeneration""",
"""LongT5Model""",
"""LongT5PreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""FlaxLongT5ForConditionalGeneration""",
"""FlaxLongT5Model""",
"""FlaxLongT5PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
while b:
snake_case_ ,snake_case_ : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE__ ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 327 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"""vocab_file""": """vocab.json"""}
a_ = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
a_ = {"""mgp-str""": 27}
class __lowerCAmelCase ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="[GO]" , __UpperCAmelCase="[GO]" , __UpperCAmelCase="[s]" , __UpperCAmelCase="[GO]" , **__UpperCAmelCase ):
'''simple docstring'''
super().__init__(
unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , **_A , )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(_A )
__lowerCamelCase = {v: k for k, v in self.vocab.items()}
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return len(self.vocab )
def lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = []
for s in text:
char_tokens.extend(_A )
return char_tokens
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.decoder.get(_A )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_A ) )
return
__lowerCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
return (vocab_file,)
| 330 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Dict = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Dict = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Dict = os.path.join(__a , __a )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving model to {ckpt_dir}""" )
snake_case_ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Optional[Any] = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[Any] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Tuple = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Tuple = (
os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
snake_case_ : List[Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
snake_case_ : Any = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : str = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : Any = os.path.join(__a , __a )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__a , __a )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Union[str, Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : List[Any] = os.path.join(__a , __a )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
snake_case_ : str = (
os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
snake_case_ : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
snake_case_ : Optional[int] = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 327 | 0 |
from __future__ import annotations
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , ) -> Union[str, Any]:
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
lowerCamelCase__ : Union[str, Any] = [3, 34, 4, 12, 5, 2]
lowerCamelCase__ : Optional[int] = 9
lowerCamelCase__ : int = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 225 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : str = do_resize
snake_case_ : str = size if size is not None else {'shortest_edge': 288}
snake_case_ : Any = size_divisor
snake_case_ : Any = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : int = do_center_crop
snake_case_ : str = image_mean
snake_case_ : int = image_std
snake_case_ : Any = do_pad
snake_case_ : Optional[int] = batch_size
snake_case_ : List[str] = num_channels
snake_case_ : Any = min_resolution
snake_case_ : str = max_resolution
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int:
"""simple docstring"""
if not batched:
snake_case_ : Optional[int] = self.size['shortest_edge']
snake_case_ : List[Any] = image_inputs[0]
if isinstance(_A , Image.Image ):
snake_case_ ,snake_case_ : Optional[Any] = image.size
else:
snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2]
snake_case_ : Dict = size / min(_A , _A )
if h < w:
snake_case_ ,snake_case_ : str = size, scale * w
else:
snake_case_ ,snake_case_ : Tuple = scale * h, size
snake_case_ : Dict = int((1333 / 800) * size )
if max(_A , _A ) > max_size:
snake_case_ : Union[str, Any] = max_size / max(_A , _A )
snake_case_ : Any = newh * scale
snake_case_ : Union[str, Any] = neww * scale
snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 )
snake_case_ ,snake_case_ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case_ : Optional[int] = []
for image in image_inputs:
snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(_A , key=lambda _A : item[0] )[0]
snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'size_divisor' ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 327 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
lowercase_ = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowercase_ = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowercase_ = sorted(arg_to_scheduler.keys())
lowercase_ = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class snake_case ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Any, _lowerCamelCase : argparse.Namespace, _lowerCamelCase : List[Any]=None, _lowerCamelCase : Optional[int]="base", _lowerCamelCase : int=None, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : Optional[int]=None, **_lowerCamelCase : Any, ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__A = 0
__A = Path(self.hparams.output_dir )
__A = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__A = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({'''num_labels''': num_labels} if num_labels is not None else {}), cache_dir=_A, **_A, )
else:
__A = config
__A = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams, _A, _A ):
assert hasattr(self.config, _A ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config, _A, getattr(self.hparams, _A ) )
if tokenizer is None:
__A = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=_A, )
else:
__A = tokenizer
__A = MODEL_MODES[mode]
if model is None:
__A = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ), config=self.config, cache_dir=_A, )
else:
__A = model
def _SCREAMING_SNAKE_CASE ( self : List[Any], *_lowerCamelCase : List[Any], **_lowerCamelCase : List[Any] ):
'''simple docstring'''
__A = self.model_type.from_pretrained(*_A, **_A )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = arg_to_scheduler[self.hparams.lr_scheduler]
__A = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() )
__A = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.model
__A = ['bias', 'LayerNorm.weight']
__A = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__A = Adafactor(
_A, lr=self.hparams.learning_rate, scale_parameter=_A, relative_step=_A )
else:
__A = AdamW(
_A, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon )
__A = optimizer
__A = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : int, _lowerCamelCase : Dict ):
'''simple docstring'''
return self.validation_step(_A, _A )
def _SCREAMING_SNAKE_CASE ( self : int, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.validation_end(_A )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = max(1, self.hparams.gpus ) # TODO: consider num_tpu_cores
__A = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : Any ):
'''simple docstring'''
if stage == "test":
__A = len(self.test_dataloader().dataset )
else:
__A = self.get_dataloader('''train''', self.hparams.train_batch_size, shuffle=_A )
__A = len(self.train_dataloader().dataset )
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : str, _lowerCamelCase : int, _lowerCamelCase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return self.train_loader
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return self.get_dataloader('''dev''', self.hparams.eval_batch_size, shuffle=_A )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
return self.get_dataloader('''test''', self.hparams.eval_batch_size, shuffle=_A )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir, '''cached_{}_{}_{}'''.format(
_A, list(filter(_A, self.hparams.model_name_or_path.split('''/''' ) ) ).pop(), str(self.hparams.max_seq_length ), ), )
@pl.utilities.rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : Dict, _lowerCamelCase : Dict[str, Any] ):
'''simple docstring'''
__A = self.output_dir.joinpath('''best_tfmr''' )
__A = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str], _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''', default=_A, type=_A, required=_A, help='''Path to pretrained model or model identifier from huggingface.co/models''', )
parser.add_argument(
'''--config_name''', default='''''', type=_A, help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''', default=_A, type=_A, help='''Pretrained tokenizer name or path if not the same as model_name''', )
parser.add_argument(
'''--cache_dir''', default=str(Path(_A ).parent / '''test_run''' / '''cache''' ), type=_A, help='''Where do you want to store the pre-trained models downloaded from huggingface.co''', )
parser.add_argument(
'''--encoder_layerdrop''', type=_A, help='''Encoder layer dropout probability (Optional). Goes into model.config''', )
parser.add_argument(
'''--decoder_layerdrop''', type=_A, help='''Decoder layer dropout probability (Optional). Goes into model.config''', )
parser.add_argument(
'''--dropout''', type=_A, help='''Dropout probability (Optional). Goes into model.config''', )
parser.add_argument(
'''--attention_dropout''', type=_A, help='''Attention dropout probability (Optional). Goes into model.config''', )
parser.add_argument('''--learning_rate''', default=5e-5, type=_A, help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''', default='''linear''', choices=_A, metavar=_A, type=_A, help='''Learning rate scheduler''', )
parser.add_argument('''--weight_decay''', default=0.0, type=_A, help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''', default=1e-8, type=_A, help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''', default=0, type=_A, help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''', default=4, type=_A, help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''', dest='''max_epochs''', default=3, type=_A )
parser.add_argument('''--train_batch_size''', default=32, type=_A )
parser.add_argument('''--eval_batch_size''', default=32, type=_A )
parser.add_argument('''--adafactor''', action='''store_true''' )
class snake_case ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : List[str], _lowerCamelCase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class snake_case ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int], _lowerCamelCase : Tuple ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class snake_case ( pl.Callback ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str, _lowerCamelCase : Optional[int], _lowerCamelCase : Dict ):
'''simple docstring'''
__A = trainer.lr_schedulers[0]['scheduler']
__A = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : pl.Trainer, _lowerCamelCase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
__A = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_A, str(metrics[key] ) ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : pl.Trainer, _lowerCamelCase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
__A = trainer.callback_metrics
# Log and save results to file
__A = os.path.join(pl_module.hparams.output_dir, '''test_results.txt''' )
with open(_A, '''w''' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_A, str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(_A, str(metrics[key] ) ) )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
parser.add_argument(
'''--output_dir''' , default=str(Path(__a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=__a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=__a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=__a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=__a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=__a , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(__a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=__a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[] , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__A = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__a )
# add custom checkpoints
if checkpoint_callback is None:
__A = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__a )
if logging_callback is None:
__A = LoggingCallback()
__A = {}
if args.fpaa:
__A = 1_6
if args.gpus > 1:
__A = 'auto'
__A = 'ddp'
__A = args.accumulate_grad_batches
__A = None
__A = 'auto'
__A = pl.Trainer.from_argparse_args(
__a , weights_summary=__a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__a , val_check_interval=1 , num_sanity_val_steps=2 , **__a , )
if args.do_train:
trainer.fit(__a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 266 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
a__ = CanineTokenizer
a__ = False
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__magic_name__ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def _lowercase ( self : int , **UpperCamelCase__ : str ) -> CanineTokenizer:
"""simple docstring"""
__magic_name__ = self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
__magic_name__ = 1024
return tokenizer
@require_torch
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.canine_tokenizer
__magic_name__ = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
__magic_name__ = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
__magic_name__ = tokenizer(_A , padding=_A , return_tensors="""pt""" )
self.assertIsInstance(_A , _A )
__magic_name__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.canine_tokenizer
__magic_name__ = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
__magic_name__ = tokenizer(_A , padding=_A , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , _A )
self.assertIn("""attention_mask""" , _A )
self.assertIn("""token_type_ids""" , _A )
@require_torch
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
__magic_name__ = self.canine_tokenizer
__magic_name__ = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
__magic_name__ = tokenizer(
text_target=_A , max_length=32 , padding="""max_length""" , truncation=_A , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ = tokenizer.__class__.from_pretrained(_A )
__magic_name__ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__magic_name__ = chr(0XE007 )
additional_special_tokens.append(_A )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__magic_name__ = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ = tokenizer.__class__.from_pretrained(_A )
__magic_name__ = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn(_A , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ = self.get_clean_sequence(_A )
# a special token for Canine can be defined as follows:
__magic_name__ = 0XE005
__magic_name__ = chr(_A )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__magic_name__ = tokenizer.encode(_A , add_special_tokens=_A )
self.assertEqual(len(_A ) , 1 )
__magic_name__ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_A )
__magic_name__ = tokenizer.encode(_A , add_special_tokens=_A )
__magic_name__ = tokenizer.encode(_A , add_special_tokens=_A )
__magic_name__ = tokenizer.encode(_A , add_special_tokens=_A )
self.assertEqual(_A , input_encoded + special_token_id )
__magic_name__ = tokenizer.decode(_A , skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def _lowercase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ = chr(0XE005 )
__magic_name__ = chr(0XE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_A )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__magic_name__ = tokenizer.tokenize(_A )
__magic_name__ = tokenizer.tokenize(_A )
self.assertEqual(len(_A ) , 1 )
self.assertEqual(len(_A ) , 1 )
self.assertEqual(token_a[0] , _A )
self.assertEqual(token_a[0] , _A )
@require_tokenizers
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__magic_name__ = 0XE006
__magic_name__ = chr(_A )
__magic_name__ = AddedToken(_A , lstrip=_A )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_A )
tokenizer.from_pretrained(_A )
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__ = json.load(_A )
with open(os.path.join(_A , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__magic_name__ = json.load(_A )
# a special token for Canine can be defined as follows:
__magic_name__ = 0XE006
__magic_name__ = chr(_A )
__magic_name__ = [new_token_a]
__magic_name__ = [new_token_a]
with open(os.path.join(_A , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ = tokenizer_class.from_pretrained(_A , extra_ids=0 )
self.assertIn(_A , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__magic_name__ = 0XE007
__magic_name__ = chr(_A )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ = [AddedToken(_A , lstrip=_A )]
__magic_name__ = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , extra_ids=0 )
self.assertIn(_A , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ = 'hello world'
if self.space_between_special_tokens:
__magic_name__ = '[CLS] hello world [SEP]'
else:
__magic_name__ = input
__magic_name__ = tokenizer.encode(_A , add_special_tokens=_A )
__magic_name__ = tokenizer.decode(_A , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_A , [output, output.lower()] )
def _lowercase ( self : str ) -> str:
"""simple docstring"""
__magic_name__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__magic_name__ = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ = 'a'
__magic_name__ = ord(_A )
for attr in attributes_list:
setattr(_A , attr + """_id""" , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + """_id""" ) , _A )
setattr(_A , attr + """_id""" , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + """_id""" ) , _A )
setattr(_A , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_A , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_A , """additional_special_tokens_ids""" ) , [] )
__magic_name__ = 0XE006
__magic_name__ = chr(_A )
setattr(_A , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(_A , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(_A , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def _lowercase ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
pass
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
pass
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : int ) -> int:
"""simple docstring"""
pass
def _lowercase ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
def _lowercase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
| 88 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ):
snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
_SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 327 | 0 |
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = 32 , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , UpperCamelCase_ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , UpperCamelCase_ = True , UpperCamelCase_=7 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=3 , ) -> Optional[int]:
__lowercase : str = parent
__lowercase : str = do_resize
__lowercase : str = size if size is not None else {'shortest_edge': 2_88}
__lowercase : Any = size_divisor
__lowercase : Any = do_rescale
__lowercase : Union[str, Any] = rescale_factor
__lowercase : str = do_normalize
__lowercase : int = do_center_crop
__lowercase : str = image_mean
__lowercase : int = image_std
__lowercase : Any = do_pad
__lowercase : Optional[int] = batch_size
__lowercase : List[str] = num_channels
__lowercase : Any = min_resolution
__lowercase : str = max_resolution
def _lowerCamelCase ( self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> int:
if not batched:
__lowercase : Optional[int] = self.size['shortest_edge']
__lowercase : List[Any] = image_inputs[0]
if isinstance(_A , Image.Image ):
__lowercase : Optional[Any] = image.size
else:
__lowercase : str = image.shape[1], image.shape[2]
__lowercase : Dict = size / min(_A , _A )
if h < w:
__lowercase : str = size, scale * w
else:
__lowercase : Tuple = scale * h, size
__lowercase : Dict = int((13_33 / 8_00) * size )
if max(_A , _A ) > max_size:
__lowercase : Union[str, Any] = max_size / max(_A , _A )
__lowercase : Any = newh * scale
__lowercase : Union[str, Any] = neww * scale
__lowercase : Any = int(newh + 0.5 ), int(neww + 0.5 )
__lowercase : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowercase : Optional[int] = []
for image in image_inputs:
__lowercase : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase : str = max(_A , key=lambda UpperCamelCase_ : item[0] )[0]
__lowercase : List[str] = max(_A , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case_ , unittest.TestCase ):
UpperCamelCase =BridgeTowerImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : int = BridgeTowerImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , '''image_mean''' ) )
self.assertTrue(hasattr(_A , '''image_std''' ) )
self.assertTrue(hasattr(_A , '''do_normalize''' ) )
self.assertTrue(hasattr(_A , '''do_resize''' ) )
self.assertTrue(hasattr(_A , '''size''' ) )
self.assertTrue(hasattr(_A , '''size_divisor''' ) )
def _lowerCamelCase ( self ) -> List[str]:
pass
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
__lowercase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values
__lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ) -> Any:
__lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
__lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowercase : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values
__lowercase : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
__lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : str = image_processing(_A , return_tensors='''pt''' ).pixel_values
__lowercase : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 249 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: List[Any] = VOCAB_FILES_NAMES
__magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION
__magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Union[str, Any] = LxmertTokenizer
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : List[Any] = normalizer_class(**_A )
snake_case_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : str = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 327 | 0 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = False, False, False
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = True
__UpperCamelCase : Optional[str] = None
# Automatically constructed
__UpperCamelCase : ClassVar[str] = "dict"
__UpperCamelCase : ClassVar[Any] = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
__UpperCamelCase : str = field(default='''Audio''' , init=snake_case_ , repr=snake_case_ )
def __call__(self ) -> Optional[Any]:
"""simple docstring"""
return self.pa_type
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("""To support encoding audio data, please install \'soundfile\'.""" ) from err
if isinstance(_A , _A ):
return {"bytes": None, "path": value}
elif isinstance(_A , _A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
SCREAMING_SNAKE_CASE__ : Tuple = BytesIO()
sf.write(_A , value["""array"""] , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("""pcm""" ):
# "PCM" only has raw audio bytes
if value.get("""sampling_rate""" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("""To use PCM files, please specify a \'sampling_rate\' in Audio object""" )
if value.get("""bytes""" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
SCREAMING_SNAKE_CASE__ : List[Any] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 3_27_67
SCREAMING_SNAKE_CASE__ : List[str] = BytesIO(bytes() )
sf.write(_A , _A , value["""sampling_rate"""] , format="""wav""" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" )
SCREAMING_SNAKE_CASE__ : Any = (value['path'], BytesIO(value["""bytes"""] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("""To support decoding audio files, please install \'librosa\' and \'soundfile\'.""" ) from err
SCREAMING_SNAKE_CASE__ : str = xsplitext(_A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"""Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"""Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, """
"""You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ )
if file is None:
SCREAMING_SNAKE_CASE__ : str = token_per_repo_id or {}
SCREAMING_SNAKE_CASE__ : str = path.split("""::""" )[-1]
try:
SCREAMING_SNAKE_CASE__ : Tuple = string_to_dict(_A , config.HUB_DATASETS_URL )['repo_id']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
SCREAMING_SNAKE_CASE__ : Any = None
with xopen(_A , """rb""" , use_auth_token=_A ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sf.read(_A )
else:
SCREAMING_SNAKE_CASE__ : Tuple = sf.read(_A )
SCREAMING_SNAKE_CASE__ : Any = array.T
if self.mono:
SCREAMING_SNAKE_CASE__ : List[str] = librosa.to_mono(_A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = librosa.resample(_A , orig_sr=_A , target_sr=self.sampling_rate )
SCREAMING_SNAKE_CASE__ : str = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __magic_name__ (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError("""Cannot flatten a decoded Audio feature.""" )
return {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
SCREAMING_SNAKE_CASE__ : str = pa.array([None] * len(_A ) , type=pa.binary() )
SCREAMING_SNAKE_CASE__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.array([None] * len(_A ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : str = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ):
SCREAMING_SNAKE_CASE__ : int = pa.array([Audio().encode_example(_A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
SCREAMING_SNAKE_CASE__ : Dict = storage.field("""bytes""" )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = pa.array([None] * len(_A ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = storage.field("""path""" )
else:
SCREAMING_SNAKE_CASE__ : str = pa.array([None] * len(_A ) , type=pa.string() )
SCREAMING_SNAKE_CASE__ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
return array_cast(_A , self.pa_type )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE__ ):
with xopen(_A , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : List[Any] = f.read()
return bytes_
SCREAMING_SNAKE_CASE__ : Tuple = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
SCREAMING_SNAKE_CASE__ : Dict = pa.array(
[os.path.basename(_A ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_A , self.pa_type )
| 25 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
snake_case_ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 0:
return False
snake_case_ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 236 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 | 0 |
from __future__ import annotations
def snake_case ( snake_case__ :Optional[int] , snake_case__ :int , snake_case__ :List[str]) -> List[str]:
if len(__a) == 0:
raise ValueError("""find_max() arg is an empty sequence""")
if (
left >= len(__a)
or left < -len(__a)
or right >= len(__a)
or right < -len(__a)
):
raise IndexError("""list index out of range""")
if left == right:
return nums[left]
_A = (left + right) >> 1 # the middle
_A = find_max(__a , __a , __a) # find max in range[left, mid]
_A = find_max(__a , mid + 1 , __a) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 180 |
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any:
"""simple docstring"""
if tokenize_kwargs is None:
snake_case_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
snake_case_ : int = truncation
snake_case_ : Optional[int] = tokenize_kwargs
snake_case_ : Dict = {}
if return_tensors is not None:
snake_case_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]:
"""simple docstring"""
snake_case_ : Dict = self.framework
snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A )
return model_inputs
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Tuple = self.model(**_A )
return model_outputs
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 327 | 0 |
"""simple docstring"""
import math
import sys
def _snake_case ( lowercase__ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = ''
try:
with open(__a , """rb""" ) as binary_file:
lowerCAmelCase_ :List[str] = binary_file.read()
for dat in data:
lowerCAmelCase_ :Optional[int] = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( lowercase__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Tuple = {'0': '0', '1': '1'}
lowerCAmelCase_ :int = '', ''
lowerCAmelCase_ :List[Any] = len(__a )
for i in range(len(__a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase_ :Tuple = lexicon[curr_string]
result += last_match_id
lowerCAmelCase_ :Optional[Any] = last_match_id + '0'
if math.loga(__a ).is_integer():
lowerCAmelCase_ :List[Any] = {}
for curr_key in list(__a ):
lowerCAmelCase_ :Tuple = lexicon.pop(__a )
lowerCAmelCase_ :Optional[Any] = new_lex
lowerCAmelCase_ :Optional[int] = last_match_id + '1'
index += 1
lowerCAmelCase_ :List[str] = ''
return result
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Tuple = 8
try:
with open(__a , """wb""" ) as opened_file:
lowerCAmelCase_ :Optional[int] = [
to_write[i : i + byte_length]
for i in range(0 , len(__a ) , __a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__a , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase_ :List[str] = data_bits[counter:]
lowerCAmelCase_ :Tuple = data_bits[counter + 1 :]
return data_bits
def _snake_case ( lowercase__ : List[str] , lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = read_file_binary(__a )
lowerCAmelCase_ :Tuple = remove_prefix(__a )
lowerCAmelCase_ :Tuple = decompress_data(__a )
write_file_binary(__a , __a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 84 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( __a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Any = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __a = 10 ):
return sum(
int(''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case__ (snake_case_ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :int = MobileBertTokenizer
__lowerCAmelCase :List[Any] = MobileBertTokenizerFast
__lowerCAmelCase :List[str] = True
__lowerCAmelCase :List[Any] = True
__lowerCAmelCase :Union[str, Any] = filter_non_english
__lowerCAmelCase :str = "google/mobilebert-uncased"
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
super().setUp()
a__ : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
a__ : List[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = 'UNwant\u00E9d,running'
a__ : Optional[int] = 'unwanted, running'
return input_text, output_text
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Dict = self.tokenizer_class(self.vocab_file )
a__ : Any = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a__ : List[str] = self.get_tokenizer()
a__ : Any = self.get_rust_tokenizer()
a__ : str = 'UNwant\u00E9d,running'
a__ : str = tokenizer.tokenize(_A )
a__ : Any = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
a__ : Optional[Any] = tokenizer.encode(_A , add_special_tokens=_A )
a__ : List[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
a__ : Any = self.get_rust_tokenizer()
a__ : Union[str, Any] = tokenizer.encode(_A )
a__ : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
a__ : str = self.get_tokenizer(do_lower_case=_A )
a__ : Dict = self.get_rust_tokenizer(do_lower_case=_A )
a__ : List[str] = 'UNwant\u00E9d,running'
a__ : Tuple = tokenizer.tokenize(_A )
a__ : Dict = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
a__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
a__ : Tuple = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
a__ : Tuple = self.get_rust_tokenizer()
a__ : Dict = tokenizer.encode(_A )
a__ : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : List[str] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
a__ : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Tuple = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : str = BasicTokenizer(do_lower_case=_A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
a__ : Any = {}
for i, token in enumerate(_A ):
a__ : str = i
a__ : Any = WordpieceTokenizer(vocab=_A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = self.get_tokenizer()
a__ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Tuple = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
a__ : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_A )
a__ : Any = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_A )
a__ : Tuple = tokenizer.build_inputs_with_special_tokens(_A )
a__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
a__ : Optional[Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
a__ : List[str] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
a__ : Optional[Any] = tokenizer_r.do_lower_case if hasattr(_A , """do_lower_case""" ) else False
a__ : Tuple = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : str = ['的', '人', '有']
a__ : Any = ''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : int = True
a__ : Optional[int] = self.tokenizer_class.from_pretrained(_A , **_A )
a__ : List[str] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
a__ : List[Any] = tokenizer_p.encode(_A , add_special_tokens=_A )
a__ : Union[str, Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
a__ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_A )
a__ : Optional[int] = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
a__ : Union[str, Any] = False
a__ : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
a__ : Any = self.tokenizer_class.from_pretrained(_A , **_A )
a__ : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
a__ : List[Any] = tokenizer_p.encode(_A , add_special_tokens=_A )
a__ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(_A )
a__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
a__ : Optional[int] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 170 |
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Any = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ = 1_000_000 ) -> List[Any]:
"""simple docstring"""
A__ = 1
A__ = 1
A__ = {1: 1}
for inputa in range(2 , __a ):
A__ = 0
A__ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
A__ = (3 * number) + 1
counter += 1
if inputa not in counters:
A__ = counter
if counter > pre_counter:
A__ = inputa
A__ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 14 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327 | 0 |
from __future__ import annotations
def a__ ( _UpperCamelCase : Optional[int] ):
if not nums:
raise ValueError('''List is empty''' )
return sum(__a ) / len(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : Tuple = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[Any] = mask_ratio
snake_case_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A )
snake_case_ : str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = TFViTMAEForPreTraining(_A )
snake_case_ : Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Dict = TFViTMAEForPreTraining(_A )
snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[str] = model(_A , training=_A )
snake_case_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs
snake_case_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__magic_name__: Dict = False
__magic_name__: Dict = False
__magic_name__: List[Any] = False
__magic_name__: Dict = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = TFViTMAEModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_A )
snake_case_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A )
snake_case_ : List[str] = model(_A , noise=_A )
snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
snake_case_ : str = model(**_A , noise=_A )
snake_case_ : Union[str, Any] = outputs_dict[0].numpy()
snake_case_ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_A : int ):
snake_case_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
snake_case_ : str = v.numpy()
else:
snake_case_ : Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_A )
snake_case_ : List[Any] = self._prepare_for_class(_A , _A )
snake_case_ : Any = prepare_numpy_arrays(_A )
snake_case_ : List[Any] = model(_A , noise=_A )
snake_case_ : List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , '_keras_serializable' , _A )
}
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Optional[Any] = main_layer_class(_A )
snake_case_ : List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
snake_case_ : int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' )
model.save(_A )
snake_case_ : str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
snake_case_ : List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A )
snake_case_ : int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Any = outputs.last_hidden_state.numpy()
snake_case_ : Optional[int] = 0
else:
snake_case_ : str = outputs.logits.numpy()
snake_case_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
snake_case_ : Any = model_class.from_pretrained(_A )
snake_case_ : Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Dict = after_outputs['last_hidden_state'].numpy()
snake_case_ : Dict = 0
else:
snake_case_ : Any = after_outputs['logits'].numpy()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
snake_case_ : int = self._prepare_for_class(_A , _A )
snake_case_ : str = model(_A , noise=_A )
snake_case_ : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
snake_case_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : str = model_class.from_config(model.config )
snake_case_ : Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : int = ViTMAEConfig()
snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Optional[Any] = model(**_A , noise=_A )
# verify the logits
snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 327 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> Union[str, Any]:
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = metric_id
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = [MetricMock(snake_case_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def lowerCAmelCase_ ( self : Any ):
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
if "tmp_path" in args:
SCREAMING_SNAKE_CASE_ = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__a , match='https://huggingface.co/docs/evaluate' ):
func(*__a ) | 225 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 327 | 0 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = tmp_path / 'cache'
__A = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__A = ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = tmp_path / 'cache'
__A = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__A = features.copy() if features else default_expected_features
__A = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
__A = ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = tmp_path / 'cache'
__A = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__A = ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if issubclass(__a , __a ):
__A = parquet_path
elif issubclass(__a , __a ):
__A = [parquet_path]
__A = tmp_path / 'cache'
__A = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__A = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=("train",) ):
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
__A = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = tmp_path / 'cache'
__A = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__A = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = tmp_path / 'cache'
__A = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__A = features.copy() if features else default_expected_features
__A = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
__A = ParquetDatasetReader({'''train''': parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if split:
__A = {split: parquet_path}
else:
__A = 'train'
__A = {'train': parquet_path, 'test': parquet_path}
__A = tmp_path / 'cache'
__A = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__A = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__A = pq.ParquetFile(tmp_path / '''foo.parquet''' )
__A = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = str(shared_datadir / '''test_image_rgb.jpg''' )
__A = {'image': [image_path]}
__A = Features({'''image''': Image()} )
__A = Dataset.from_dict(__a , features=__a )
__A = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
__A = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
__A = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 266 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
from __future__ import annotations
from math import pi, sqrt
def a__ ( A_, A_ ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
from math import pi
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 327 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'google/vivit-b-16x2-kinetics400': (
'https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase_ ( snake_case_ ):
UpperCamelCase ="vivit"
def __init__( self , UpperCamelCase_=2_24 , UpperCamelCase_=32 , UpperCamelCase_=[2, 16, 16] , UpperCamelCase_=3 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu_fast" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-06 , UpperCamelCase_=True , **UpperCamelCase_ , ) -> List[Any]:
__lowercase : Optional[int] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : List[Any] = intermediate_size
__lowercase : List[Any] = hidden_act
__lowercase : int = hidden_dropout_prob
__lowercase : Union[str, Any] = attention_probs_dropout_prob
__lowercase : Union[str, Any] = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Optional[int] = image_size
__lowercase : List[str] = num_frames
__lowercase : Optional[Any] = tubelet_size
__lowercase : Optional[Any] = num_channels
__lowercase : List[Any] = qkv_bias
super().__init__(**_A )
| 249 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256}
snake_case_ : Tuple = get_size_dict(_A )
snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ : int = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : str = size
snake_case_ : List[str] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_A )
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Optional[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 327 | 0 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
UpperCAmelCase__ : str = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
UpperCAmelCase__ : int = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It\'s like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It\'s like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n'
UpperCAmelCase__ : List[str] = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
SCREAMING_SNAKE_CASE__ : List[Any] = np.array([re.sub(_A , """""" , _A ) for x in predictions] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([re.sub(_A , """""" , _A ) for x in references] )
else:
SCREAMING_SNAKE_CASE__ : Dict = np.asarray(_A )
SCREAMING_SNAKE_CASE__ : Tuple = np.asarray(_A )
if ignore_case:
SCREAMING_SNAKE_CASE__ : List[str] = np.char.lower(_A )
SCREAMING_SNAKE_CASE__ : Any = np.char.lower(_A )
if ignore_punctuation:
SCREAMING_SNAKE_CASE__ : int = string.punctuation.maketrans("""""" , """""" , string.punctuation )
SCREAMING_SNAKE_CASE__ : Tuple = np.char.translate(_A , table=_A )
SCREAMING_SNAKE_CASE__ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
SCREAMING_SNAKE_CASE__ : Optional[int] = string.digits.maketrans("""""" , """""" , string.digits )
SCREAMING_SNAKE_CASE__ : str = np.char.translate(_A , table=_A )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.char.translate(_A , table=_A )
SCREAMING_SNAKE_CASE__ : int = predictions == references
return {"exact_match": np.mean(_A ) * 1_00}
| 25 |
import sys
_SCREAMING_SNAKE_CASE = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE__ ( __a = N ):
snake_case_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(__a ) - 12 ):
snake_case_ : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : int = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class __lowerCAmelCase ( snake_case_):
_a = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True})
_a = Features({'''text''': Value('''string''')})
_a = Features({})
_a = "text"
@property
def SCREAMING_SNAKE_CASE ( self: Any ):
return {self.text_column: "text"}
| 236 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__: Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__: Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} )
__magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__: bool = field(
default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__a , __a , getattr(__a , __a ) )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case_ : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case_ : List[Any] = SeqaSeqDataset
# Get datasets
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
snake_case_ : List[str] = (
dataset_class(
__a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case_ : Any = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
snake_case_ : List[str] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
snake_case_ : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
snake_case_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case_ : Tuple = train_result.metrics
snake_case_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' )
snake_case_ : str = data_args.n_val
snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' )
snake_case_ : Union[str, Any] = test_output.metrics
snake_case_ : int = data_args.n_test
if trainer.is_world_process_zero():
snake_case_ : List[str] = round(metrics['test_loss'] , 4 )
handle_metrics('test' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
snake_case_ : Any = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
snake_case_ : Any = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327 | 0 |
def snake_case ( snake_case__ :Optional[Any]) -> Dict:
if not isinstance(__a , __a):
_A = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__a)
if number < 0:
return False
_A = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""PoolFormerFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 327 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int , lowercase__ : int = False ) -> List[Any]:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 1_0 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowerCAmelCase_ :str = [
2_0_4_7,
1_3_7_3_6_5_3,
2_5_3_2_6_0_0_1,
3_2_1_5_0_3_1_7_5_1,
2_1_5_2_3_0_2_8_9_8_7_4_7,
3_4_7_4_7_4_9_6_6_0_3_8_3,
3_4_1_5_5_0_0_7_1_7_2_8_3_2_1,
1,
3_8_2_5_1_2_3_0_5_6_5_4_6_4_1_3_0_5_1,
1,
1,
3_1_8_6_6_5_8_5_7_8_3_4_0_3_1_1_5_1_1_6_7_4_6_1,
3_3_1_7_0_4_4_0_6_4_6_7_9_8_8_7_3_8_5_9_6_1_9_8_1,
]
lowerCAmelCase_ :str = [2, 3, 5, 7, 1_1, 1_3, 1_7, 1_9, 2_3, 2_9, 3_1, 3_7, 4_1]
for idx, _p in enumerate(__a , 1 ):
if n < _p:
# then we have our last prime to check
lowerCAmelCase_ :Any = primes[:idx]
break
lowerCAmelCase_ :List[str] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCAmelCase_ :int = False
for r in range(__a ):
lowerCAmelCase_ :int = pow(__a , d * 2**r , __a )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCAmelCase_ :Optional[int] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _snake_case ( ) -> str:
'''simple docstring'''
assert not miller_rabin(5_6_1 )
assert miller_rabin(5_6_3 )
# 2047
assert not miller_rabin(8_3_8_2_0_1 )
assert miller_rabin(8_3_8_2_0_7 )
# 1_373_653
assert not miller_rabin(1_7_3_1_6_0_0_1 )
assert miller_rabin(1_7_3_1_6_0_1_7 )
# 25_326_001
assert not miller_rabin(3_0_7_8_3_8_6_6_4_1 )
assert miller_rabin(3_0_7_8_3_8_6_6_5_3 )
# 3_215_031_751
assert not miller_rabin(1_7_1_3_0_4_5_5_7_4_8_0_1 )
assert miller_rabin(1_7_1_3_0_4_5_5_7_4_8_1_9 )
# 2_152_302_898_747
assert not miller_rabin(2_7_7_9_7_9_9_7_2_8_3_0_7 )
assert miller_rabin(2_7_7_9_7_9_9_7_2_8_3_2_7 )
# 3_474_749_660_383
assert not miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_4_4_1 )
assert miller_rabin(1_1_3_8_5_0_0_2_3_9_0_9_5_2_7 )
# 341_550_071_728_321
assert not miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_5_1 )
assert miller_rabin(1_2_7_5_0_4_1_0_1_8_8_4_8_8_0_4_3_9_1 )
# 3_825_123_056_546_413_051
assert not miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_8_6_7 )
assert miller_rabin(7_9_6_6_6_4_6_4_4_5_8_5_0_7_7_8_7_7_9_1_9_5_1 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_3_3 )
assert miller_rabin(5_5_2_8_4_0_6_7_7_4_4_6_6_4_7_8_9_7_6_6_0_3_5_9 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 84 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A )
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' )
snake_case_ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ : Optional[Any] = model.generate(**_A )
snake_case_ : int = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
snake_case_ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 327 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case__ (snake_case_ ):
"""simple docstring"""
__lowerCAmelCase :UNetaDModel
__lowerCAmelCase :ScoreSdeVeScheduler
def __init__( self , __lowercase , __lowercase ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , __lowercase = 1 , __lowercase = 2_0_0_0 , __lowercase = None , __lowercase = "pil" , __lowercase = True , **__lowercase , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
a__ : List[str] = self.unet.config.sample_size
a__ : Optional[int] = (batch_size, 3, img_size, img_size)
a__ : Optional[Any] = self.unet
a__ : Optional[Any] = randn_tensor(_A , generator=_A ) * self.scheduler.init_noise_sigma
a__ : List[str] = sample.to(self.device )
self.scheduler.set_timesteps(_A )
self.scheduler.set_sigmas(_A )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
a__ : Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
a__ : Any = self.unet(_A , _A ).sample
a__ : str = self.scheduler.step_correct(_A , _A , generator=_A ).prev_sample
# prediction step
a__ : Optional[int] = model(_A , _A ).sample
a__ : Optional[Any] = self.scheduler.step_pred(_A , _A , _A , generator=_A )
a__ : int = output.prev_sample, output.prev_sample_mean
a__ : Tuple = sample_mean.clamp(0 , 1 )
a__ : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a__ : Optional[Any] = self.numpy_to_pil(_A )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_A )
| 170 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = params
snake_case_ : int = np.array(_A )
snake_case_ : Optional[int] = np.array([len(_A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple , _A : Optional[int] ) -> str:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[str] ) -> str:
"""simple docstring"""
return len(self.lengths )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.params.max_model_input_size
snake_case_ : Tuple = self.lengths > max_len
logger.info(F"""Splitting {sum(_A )} too long sequences.""" )
def divide_chunks(_A : Union[str, Any] , _A : Dict ):
return [l[i : i + n] for i in range(0 , len(_A ) , _A )]
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = []
if self.params.mlm:
snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Optional[int] = np.insert(_A , 0 , _A )
if sub_s[-1] != sep_id:
snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A )
assert len(_A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_A )
new_tok_ids.extend(_A )
new_lengths.extend([len(_A ) for l in sub_seqs] )
snake_case_ : Tuple = np.array(_A )
snake_case_ : int = np.array(_A )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = len(self )
snake_case_ : int = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : List[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = len(self )
snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : Any = (unk_occs / self.lengths) < 0.5
snake_case_ : List[Any] = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : Tuple = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = [t[0] for t in batch]
snake_case_ : int = [t[1] for t in batch]
assert len(_A ) == len(_A )
# Max for paddings
snake_case_ : str = max(_A )
# Pad token ids
if self.params.mlm:
snake_case_ : int = self.params.special_tok_ids['pad_token']
else:
snake_case_ : Dict = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids]
assert len(tk_ ) == len(_A )
assert all(len(_A ) == max_seq_len_ for t in tk_ )
snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs)
return tk_t, lg_t
| 327 | 0 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_="attention" ) -> List[str]:
"""simple docstring"""
A__ = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
A__ = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
A__ = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
A__ = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Tuple:
"""simple docstring"""
if split_mlp_wi:
A__ = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
A__ = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
A__ = (wi_a, wi_a)
else:
A__ = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
A__ = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def SCREAMING_SNAKE_CASE ( lowercase_ , *, lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = traverse_util.flatten_dict(variables['''target'''] )
A__ = {'/'.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('''Split MLP:''' , __a )
A__ = collections.OrderedDict()
# Shared embeddings.
A__ = old['token_embedder/embedding']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(__a , __a , '''encoder''' , '''pre_attention_layer_norm''' )
A__ = tax_attention_lookup(__a , __a , '''encoder''' , '''attention''' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (MLP).
A__ = tax_layer_norm_lookup(__a , __a , '''encoder''' , '''pre_mlp_layer_norm''' )
A__ = tax_mlp_lookup(__a , __a , '''encoder''' , __a )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old[
'encoder/relpos_bias/rel_embedding'
].T
A__ = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(__a , __a , '''decoder''' , '''pre_self_attention_layer_norm''' )
A__ = tax_attention_lookup(__a , __a , '''decoder''' , '''self_attention''' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (Cross Attention).
A__ = tax_layer_norm_lookup(__a , __a , '''decoder''' , '''pre_cross_attention_layer_norm''' )
A__ = tax_attention_lookup(__a , __a , '''decoder''' , '''encoder_decoder_attention''' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 2 (MLP).
A__ = tax_layer_norm_lookup(__a , __a , '''decoder''' , '''pre_mlp_layer_norm''' )
A__ = tax_mlp_lookup(__a , __a , '''decoder''' , __a )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old['decoder/decoder_norm/scale']
A__ = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ = old['decoder/logits_dense/kernel'].T
return new
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
A__ = state_dict['shared.weight']
return state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = checkpoints.load_tax_checkpoint(__a )
A__ = convert_tax_to_pytorch(__a , num_layers=config.num_layers , is_encoder_only=__a )
A__ = make_state_dict(__a , __a )
model.load_state_dict(__a , strict=__a )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False ) -> Tuple:
"""simple docstring"""
A__ = TaConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ = TaEncoderModel(__a )
else:
A__ = TaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a , __a , __a , __a )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 14 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
while b:
snake_case_ ,snake_case_ : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE__ ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 327 | 0 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( snake_case_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = 50 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , )
__lowerCamelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCamelCase = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(_A , _A , _A ).prev_sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(_A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_A ), "This is a local test"
| 330 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Dict = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Dict = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Dict = os.path.join(__a , __a )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving model to {ckpt_dir}""" )
snake_case_ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Optional[Any] = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[Any] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Tuple = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Tuple = (
os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
snake_case_ : List[Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
snake_case_ : Any = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : str = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : Any = os.path.join(__a , __a )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__a , __a )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Union[str, Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : List[Any] = os.path.join(__a , __a )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
snake_case_ : str = (
os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
snake_case_ : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
snake_case_ : Optional[int] = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 327 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
SCREAMING_SNAKE_CASE_ = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(_A ) , _A )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_A ) , x.transpose() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ) , np.asarray(transpose(_A ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , np.asarray(transpose(_A , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.reshape(_A , (4, 3) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , np.reshape(_A , (12, 5) ) ) )
@require_torch
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , reshape(_A , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , reshape(_A , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.asarray(reshape(_A , (4, 3) ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (12, 5) ) , np.asarray(reshape(_A , (12, 5) ) ) ) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_A ) , np.squeeze(_A ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.squeeze(_A , axis=2 ) ) )
@require_torch
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ) , np.asarray(squeeze(_A ) ) ) )
SCREAMING_SNAKE_CASE_ = np.random.randn(1 , 4 , 1 , 5 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.asarray(squeeze(_A , axis=2 ) ) ) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.expand_dims(_A , axis=1 ) ) )
@require_torch
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = np.random.randn(3 , 4 )
SCREAMING_SNAKE_CASE_ = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.asarray(expand_dims(_A , axis=1 ) ) ) ) | 225 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : str = do_resize
snake_case_ : str = size if size is not None else {'shortest_edge': 288}
snake_case_ : Any = size_divisor
snake_case_ : Any = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : int = do_center_crop
snake_case_ : str = image_mean
snake_case_ : int = image_std
snake_case_ : Any = do_pad
snake_case_ : Optional[int] = batch_size
snake_case_ : List[str] = num_channels
snake_case_ : Any = min_resolution
snake_case_ : str = max_resolution
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int:
"""simple docstring"""
if not batched:
snake_case_ : Optional[int] = self.size['shortest_edge']
snake_case_ : List[Any] = image_inputs[0]
if isinstance(_A , Image.Image ):
snake_case_ ,snake_case_ : Optional[Any] = image.size
else:
snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2]
snake_case_ : Dict = size / min(_A , _A )
if h < w:
snake_case_ ,snake_case_ : str = size, scale * w
else:
snake_case_ ,snake_case_ : Tuple = scale * h, size
snake_case_ : Dict = int((1333 / 800) * size )
if max(_A , _A ) > max_size:
snake_case_ : Union[str, Any] = max_size / max(_A , _A )
snake_case_ : Any = newh * scale
snake_case_ : Union[str, Any] = neww * scale
snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 )
snake_case_ ,snake_case_ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case_ : Optional[int] = []
for image in image_inputs:
snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(_A , key=lambda _A : item[0] )[0]
snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'size_divisor' ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 327 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str, _lowerCamelCase : List[Any], _lowerCamelCase : int=13, _lowerCamelCase : str=3, _lowerCamelCase : Optional[Any]=2_24, _lowerCamelCase : str=30, _lowerCamelCase : int=4_00, _lowerCamelCase : str=True, _lowerCamelCase : List[str]=None, _lowerCamelCase : Tuple=True, _lowerCamelCase : List[str]=[0.5, 0.5, 0.5], _lowerCamelCase : Any=[0.5, 0.5, 0.5], ):
'''simple docstring'''
__A = size if size is not None else {'height': 18, 'width': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = do_normalize
__A = image_mean
__A = image_std
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = ViTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = EfficientFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A, '''image_mean''' ) )
self.assertTrue(hasattr(_A, '''image_std''' ) )
self.assertTrue(hasattr(_A, '''do_normalize''' ) )
self.assertTrue(hasattr(_A, '''do_resize''' ) )
self.assertTrue(hasattr(_A, '''size''' ) )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A, Image.Image )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_A, numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A, np.ndarray )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_proc_tester, equal_resolution=_A, torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A, torch.Tensor )
# Test not batched input
__A = image_processor(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
# Test batched
__A = image_processor(_A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
), )
| 266 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327 | 0 |
__lowerCAmelCase : Optional[Any] = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowerCAmelCase : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
__lowerCAmelCase : Dict = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 88 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ):
snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
_SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 327 | 0 |
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( snake_case_ ):
def __init__( self , *UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Any:
super().__init__(*_A , **_A )
__lowercase : List[str] = eval_examples
__lowercase : List[Any] = post_process_function
def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = "eval" ) -> str:
__lowercase : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase : int = self.get_eval_dataloader(_A )
__lowercase : Optional[int] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase : Union[str, Any] = self.compute_metrics
__lowercase : Union[str, Any] = None
__lowercase : Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowercase : str = time.time()
try:
__lowercase : int = eval_loop(
_A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowercase : Optional[int] = compute_metrics
__lowercase : Optional[int] = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowercase : List[str] = self.post_process_function(_A , _A , output.predictions )
__lowercase : Any = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase : int = metrics.pop(_A )
metrics.update(output.metrics )
else:
__lowercase : Optional[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _A )
return metrics
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_ = "test" ) -> List[str]:
__lowercase : Optional[int] = self.get_test_dataloader(_A )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase : Optional[Any] = self.compute_metrics
__lowercase : Dict = None
__lowercase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__lowercase : Tuple = time.time()
try:
__lowercase : Optional[Any] = eval_loop(
_A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_A , metric_key_prefix=_A , )
finally:
__lowercase : Optional[int] = compute_metrics
__lowercase : Tuple = self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
_A , _A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase : Dict = self.post_process_function(_A , _A , output.predictions , '''predict''' )
__lowercase : Optional[Any] = self.compute_metrics(_A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
__lowercase : List[str] = metrics.pop(_A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_A )
| 249 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: List[Any] = VOCAB_FILES_NAMES
__magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION
__magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Union[str, Any] = LxmertTokenizer
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : List[Any] = normalizer_class(**_A )
snake_case_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : str = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 327 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {'vocab_file': 'spiece.model'}
UpperCAmelCase__ : Optional[int] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCAmelCase__ : Union[str, Any] = {
'albert-base-v1': 5_1_2,
'albert-large-v1': 5_1_2,
'albert-xlarge-v1': 5_1_2,
'albert-xxlarge-v1': 5_1_2,
'albert-base-v2': 5_1_2,
'albert-large-v2': 5_1_2,
'albert-xlarge-v2': 5_1_2,
'albert-xxlarge-v2': 5_1_2,
}
UpperCAmelCase__ : Optional[Any] = '▁'
class lowerCAmelCase_ (snake_case_ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = (
AddedToken(_A , lstrip=_A , rstrip=_A , normalized=_A )
if isinstance(_A , _A )
else mask_token
)
SCREAMING_SNAKE_CASE__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
SCREAMING_SNAKE_CASE__ : str = do_lower_case
SCREAMING_SNAKE_CASE__ : Union[str, Any] = remove_space
SCREAMING_SNAKE_CASE__ : List[str] = keep_accents
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
@property
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : List[str] = None
return state
def __setstate__(self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
if self.remove_space:
SCREAMING_SNAKE_CASE__ : int = ' '.join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE__ : Dict = inputs
SCREAMING_SNAKE_CASE__ : List[str] = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
SCREAMING_SNAKE_CASE__ : int = unicodedata.normalize("""NFKD""" , _A )
SCREAMING_SNAKE_CASE__ : List[str] = ''.join([c for c in outputs if not unicodedata.combining(_A )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.lower()
return outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.preprocess_text(_A )
SCREAMING_SNAKE_CASE__ : int = self.sp_model.encode(_A , out_type=_A )
SCREAMING_SNAKE_CASE__ : Any = []
for piece in pieces:
if len(_A ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE__ : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE__ : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_A )
else:
new_pieces.append(_A )
return new_pieces
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return self.sp_model.PieceToId(_A )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
return self.sp_model.IdToPiece(_A )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : List[str] = ''
SCREAMING_SNAKE_CASE__ : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = []
else:
current_sub_tokens.append(_A )
SCREAMING_SNAKE_CASE__ : Dict = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : str = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 25 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
snake_case_ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 0:
return False
snake_case_ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def UpperCAmelCase__ ( lowerCamelCase ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCAmelCase ( nn.Module):
def __init__( self: str , _lowerCAmelCase: nn.Module , _lowerCAmelCase: int ):
super().__init__()
lowercase :List[Any] = module
lowercase :List[str] = nn.Sequential(
nn.Linear(module.in_features , _A , bias=_A ) , nn.Linear(_A , module.out_features , bias=_A ) , )
lowercase :List[str] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: Tuple , *_lowerCAmelCase: int , **_lowerCAmelCase: Tuple ):
return self.module(_A , *_A , **_A ) + self.adapter(_A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_a = "bigscience/bloom-1b7"
# Constant values
_a = 2.109659552692574
_a = "Hello my name is"
_a = set()
EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''')
EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''')
EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''')
_a = 10
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :Union[str, Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowerCAmelCase ( snake_case_):
def SCREAMING_SNAKE_CASE ( self: Any ):
super().setUp()
# Models and tokenizer
lowercase :str = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
lowercase :Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_A , device_map="auto" )
def SCREAMING_SNAKE_CASE ( self: int ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :Optional[Any] = self.model_abit.config
self.assertTrue(hasattr(_A , "quantization_config" ) )
lowercase :Optional[Any] = config.to_dict()
lowercase :Optional[Any] = config.to_diff_dict()
lowercase :Dict = config.to_json_string()
def SCREAMING_SNAKE_CASE ( self: List[str] ):
from bitsandbytes.nn import Paramsabit
lowercase :List[str] = self.model_fpaa.get_memory_footprint()
lowercase :Dict = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowercase :Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :str = self.tokenizer(self.input_text , return_tensors="pt" )
lowercase :int = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_A ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Optional[Any] = BitsAndBytesConfig()
lowercase :Optional[Any] = True
lowercase :str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_A , device_map="auto" )
lowercase :Any = self.tokenizer(self.input_text , return_tensors="pt" )
lowercase :Any = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_A ) , self.EXPECTED_OUTPUTS )
def SCREAMING_SNAKE_CASE ( self: Dict ):
with self.assertRaises(_A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_A )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Optional[Any] = BitsAndBytesConfig()
with self.assertRaises(_A ):
lowercase :List[str] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_A , load_in_abit=_A , device_map="auto" , bnb_abit_quant_type="nf4" , )
def SCREAMING_SNAKE_CASE ( self: int ):
with self.assertRaises(_A ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(_A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_A ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(_A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowercase :Optional[int] = self.tokenizer(self.input_text , return_tensors="pt" )
lowercase :Union[str, Any] = self.model_fpaa.to(torch.floataa )
lowercase :Optional[int] = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
lowercase :Union[str, Any] = self.model_fpaa.to("cpu" )
# Check this does not throw an error
lowercase :Any = self.model_fpaa.half()
# Check this does not throw an error
lowercase :Tuple = self.model_fpaa.float()
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :str = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=_A , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE ( cls: Tuple ):
lowercase :Any = 't5-small'
lowercase :int = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
lowercase :Any = AutoTokenizer.from_pretrained(cls.model_name )
lowercase :Any = 'Translate in German: Hello, my dog is cute'
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self: str ):
from transformers import TaForConditionalGeneration
lowercase :Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules
lowercase :str = None
# test with `t5-small`
lowercase :str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_A , device_map="auto" )
lowercase :Optional[Any] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
lowercase :List[str] = model.generate(**_A )
# test with `flan-t5-small`
lowercase :Optional[int] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_A , device_map="auto" )
lowercase :str = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
lowercase :List[Any] = model.generate(**_A )
lowercase :Tuple = modules
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowercase :Optional[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_A , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowercase :Optional[int] = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
lowercase :Optional[int] = model.generate(**_A )
# test with `flan-t5-small`
lowercase :int = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_A , device_map="auto" )
lowercase :Any = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
lowercase :str = model.generate(**_A )
class __lowerCAmelCase ( snake_case_):
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
super().setUp()
# model_name
lowercase :Union[str, Any] = 'bigscience/bloom-560m'
lowercase :str = 't5-small'
# Different types of model
lowercase :List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=_A , device_map="auto" )
# Sequence classification model
lowercase :Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_A , device_map="auto" )
# CausalLM model
lowercase :Tuple = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_A , device_map="auto" )
# Seq2seq model
lowercase :List[str] = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_A , device_map="auto" )
def SCREAMING_SNAKE_CASE ( self: int ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowerCAmelCase ( snake_case_):
def SCREAMING_SNAKE_CASE ( self: Dict ):
super().setUp()
def SCREAMING_SNAKE_CASE ( self: List[str] ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :str = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowercase :Tuple = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowerCAmelCase ( snake_case_):
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
super().setUp()
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_A , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowercase :int = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
lowercase :List[str] = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_A ) , self.EXPECTED_OUTPUTS )
class __lowerCAmelCase ( snake_case_):
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :int = 'facebook/opt-350m'
super().setUp()
def SCREAMING_SNAKE_CASE ( self: Dict ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
lowercase :List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowercase :Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowercase :List[str] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_A ) ):
lowercase :Dict = LoRALayer(module.q_proj , rank=16 )
lowercase :Dict = LoRALayer(module.k_proj , rank=16 )
lowercase :Union[str, Any] = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
lowercase :Optional[Any] = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowercase :Optional[Any] = model.forward(**_A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_A , _A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowerCAmelCase ( snake_case_):
_a = "gpt2-xl"
_a = 3.3191854854152187
| 236 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 | 0 |
from __future__ import annotations
def snake_case ( snake_case__ :str , snake_case__ :Optional[int] = None , snake_case__ :Dict = None) -> Tuple:
if start is None:
_A = 0
if end is None:
_A = len(__a) - 1
if start >= end:
return
_A = (start + end) // 2
slowsort(__a , __a , __a)
slowsort(__a , mid + 1 , __a)
if sequence[end] < sequence[mid]:
_A = sequence[mid], sequence[end]
slowsort(__a , __a , end - 1)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 180 |
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any:
"""simple docstring"""
if tokenize_kwargs is None:
snake_case_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
snake_case_ : int = truncation
snake_case_ : Optional[int] = tokenize_kwargs
snake_case_ : Dict = {}
if return_tensors is not None:
snake_case_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]:
"""simple docstring"""
snake_case_ : Dict = self.framework
snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A )
return model_inputs
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Tuple = self.model(**_A )
return model_outputs
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 327 | 0 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A = 0 ) -> str:
lowerCAmelCase_ :Union[str, Any] = key
def __lowerCAmelCase ( self , __A , __A ) -> list[str]:
assert isinstance(_A , _A ) and isinstance(_A , _A )
lowerCAmelCase_ :Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_A ) ^ key ) for ch in content]
def __lowerCAmelCase ( self , __A , __A ) -> list[str]:
assert isinstance(_A , _A ) and isinstance(_A , _A )
lowerCAmelCase_ :int = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_A ) ^ key ) for ch in content]
def __lowerCAmelCase ( self , __A , __A = 0 ) -> str:
assert isinstance(_A , _A ) and isinstance(_A , _A )
lowerCAmelCase_ :Any = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase_ :List[str] = ''
for ch in content:
ans += chr(ord(_A ) ^ key )
return ans
def __lowerCAmelCase ( self , __A , __A = 0 ) -> str:
assert isinstance(_A , _A ) and isinstance(_A , _A )
lowerCAmelCase_ :int = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase_ :Dict = ''
for ch in content:
ans += chr(ord(_A ) ^ key )
return ans
def __lowerCAmelCase ( self , __A , __A = 0 ) -> bool:
assert isinstance(_A , _A ) and isinstance(_A , _A )
try:
with open(_A ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_A , _A ) )
except OSError:
return False
return True
def __lowerCAmelCase ( self , __A , __A ) -> bool:
assert isinstance(_A , _A ) and isinstance(_A , _A )
try:
with open(_A ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_A , _A ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 84 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( __a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Any = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __a = 10 ):
return sum(
int(''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Optional[Any] ={
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowercase : Dict =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170 |
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Any = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
while b:
A__ = b, a % b
return a
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 14 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
a_ = datasets.logging.get_logger(__name__)
a_ = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
a_ = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
a_ = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://unbabel.github.io/COMET/html/index.html''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''sources''': datasets.Value('''string''' , id='''sequence''' ),
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/Unbabel/COMET'''] , reference_urls=[
'''https://github.com/Unbabel/COMET''',
'''https://www.aclweb.org/anthology/2020.emnlp-main.213/''',
'''http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6''',
] , )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.config_name == "default":
__lowerCamelCase = comet.load_from_checkpoint(comet.download_model('''wmt20-comet-da''' ) )
else:
__lowerCamelCase = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ):
'''simple docstring'''
if gpus is None:
__lowerCamelCase = 1 if torch.cuda.is_available() else 0
__lowerCamelCase = {'src': sources, 'mt': predictions, 'ref': references}
__lowerCamelCase = [dict(zip(_A , _A ) ) for t in zip(*data.values() )]
__lowerCamelCase = self.scorer.predict(_A , gpus=_A , progress_bar=_A )
return {"mean_score": mean_score, "scores": scores}
| 330 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : Tuple = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[Any] = mask_ratio
snake_case_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A )
snake_case_ : str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = TFViTMAEForPreTraining(_A )
snake_case_ : Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Dict = TFViTMAEForPreTraining(_A )
snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[str] = model(_A , training=_A )
snake_case_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs
snake_case_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__magic_name__: Dict = False
__magic_name__: Dict = False
__magic_name__: List[Any] = False
__magic_name__: Dict = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = TFViTMAEModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_A )
snake_case_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A )
snake_case_ : List[str] = model(_A , noise=_A )
snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
snake_case_ : str = model(**_A , noise=_A )
snake_case_ : Union[str, Any] = outputs_dict[0].numpy()
snake_case_ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_A : int ):
snake_case_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
snake_case_ : str = v.numpy()
else:
snake_case_ : Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_A )
snake_case_ : List[Any] = self._prepare_for_class(_A , _A )
snake_case_ : Any = prepare_numpy_arrays(_A )
snake_case_ : List[Any] = model(_A , noise=_A )
snake_case_ : List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , '_keras_serializable' , _A )
}
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Optional[Any] = main_layer_class(_A )
snake_case_ : List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
snake_case_ : int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' )
model.save(_A )
snake_case_ : str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
snake_case_ : List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A )
snake_case_ : int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Any = outputs.last_hidden_state.numpy()
snake_case_ : Optional[int] = 0
else:
snake_case_ : str = outputs.logits.numpy()
snake_case_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
snake_case_ : Any = model_class.from_pretrained(_A )
snake_case_ : Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Dict = after_outputs['last_hidden_state'].numpy()
snake_case_ : Dict = 0
else:
snake_case_ : Any = after_outputs['logits'].numpy()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
snake_case_ : int = self._prepare_for_class(_A , _A )
snake_case_ : str = model(_A , noise=_A )
snake_case_ : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
snake_case_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : str = model_class.from_config(model.config )
snake_case_ : Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : int = ViTMAEConfig()
snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Optional[Any] = model(**_A , noise=_A )
# verify the logits
snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 327 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCamelCase__ : Any = get_logger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]=0 ) -> Any:
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
SCREAMING_SNAKE_CASE_ = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"Saving model to {output_model_file}" )
torch.save(__a , __a )
logger.info(f"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a , __a )
logger.info(f"Saving model to {output_model_file}" )
torch.save(__a , __a )
logger.info(f"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = os.path.join(__a , f"{MODEL_NAME}_{model_index}" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"Saving model to {ckpt_dir}" )
SCREAMING_SNAKE_CASE_ = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"Model saved to {ckpt_dir}" )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : str=0 ) -> int:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
SCREAMING_SNAKE_CASE_ = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
SCREAMING_SNAKE_CASE_ = os.path.join(__a , __a )
logger.info(f"Loading model from {input_model_file}" )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
logger.info(f"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a , __a )
logger.info(f"Loading model from {input_model_file}" )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
logger.info(f"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
SCREAMING_SNAKE_CASE_ = (
os.path.join(__a , f"{MODEL_NAME}_{model_index}" )
if f"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(f"Loading model from {ckpt_dir}" )
SCREAMING_SNAKE_CASE_ = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
SCREAMING_SNAKE_CASE_ = state_dict['model']
logger.info(f"Model loaded from {ckpt_dir}" )
model.load_state_dict(__a )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int=0 ) -> List[str]:
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
SCREAMING_SNAKE_CASE_ = (
f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a , __a )
logger.info(f"Saving Optimizer state to {output_optimizer_file}" )
torch.save(__a , __a )
logger.info(f"Optimizer state saved in {output_optimizer_file}" )
else:
SCREAMING_SNAKE_CASE_ = os.path.join(__a , f"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"Optimizer state saved in {ckpt_dir}" )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any]=0 ) -> Any:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
SCREAMING_SNAKE_CASE_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
SCREAMING_SNAKE_CASE_ = (
f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
SCREAMING_SNAKE_CASE_ = os.path.join(__a , __a )
logger.info(f"Loading Optimizer state from {input_optimizer_file}" )
SCREAMING_SNAKE_CASE_ = torch.load(__a )
logger.info(f"Optimizer state loaded from {input_optimizer_file}" )
else:
SCREAMING_SNAKE_CASE_ = (
os.path.join(__a , f"{OPTIMIZER_NAME}_{optimizer_index}" )
if f"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(f"Loading Optimizer from {ckpt_dir}" )
SCREAMING_SNAKE_CASE_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
SCREAMING_SNAKE_CASE_ = optim_state['optimizer']
logger.info(f"Optimizer loaded from {ckpt_dir}" )
SCREAMING_SNAKE_CASE_ = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a ) | 225 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 327 | 0 |
"""simple docstring"""
from itertools import permutations
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__A = [7, 1_1, 1_3, 1_7]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_0_0 + num[i + 5] * 1_0 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase ( __UpperCamelCase = 1_0 ):
"""simple docstring"""
return sum(
int(''''''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 266 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : int = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 |
from math import pi
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 327 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256}
snake_case_ : Tuple = get_size_dict(_A )
snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ : int = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : str = size
snake_case_ : List[str] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_A )
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Optional[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 327 | 0 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def lowercase_ ( _snake_case ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__a ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : str = 2
while True:
if is_prime(__a ):
yield num
num += 1
def lowercase_ ( _snake_case = 2_000_000 ):
return sum(takewhile(lambda _snake_case : x < n ,prime_generator() ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 25 |
import sys
_SCREAMING_SNAKE_CASE = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE__ ( __a = N ):
snake_case_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(__a ) - 12 ):
snake_case_ : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : int = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __lowerCAmelCase ( snake_case_):
_a = "efficientformer"
def __init__( self: List[Any] , _lowerCAmelCase: List[int] = [3, 2, 6, 4] , _lowerCAmelCase: List[int] = [48, 96, 2_24, 4_48] , _lowerCAmelCase: List[bool] = [True, True, True, True] , _lowerCAmelCase: int = 4_48 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: int = 4 , _lowerCAmelCase: int = 7 , _lowerCAmelCase: int = 5 , _lowerCAmelCase: int = 8 , _lowerCAmelCase: int = 4 , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: int = 16 , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 2 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: float = 1e-5 , _lowerCAmelCase: str = "gelu" , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: float = 1e-1_2 , _lowerCAmelCase: int = 2_24 , _lowerCAmelCase: float = 1e-0_5 , **_lowerCAmelCase: Optional[Any] , ):
super().__init__(**_A )
lowercase :Dict = hidden_act
lowercase :Union[str, Any] = hidden_dropout_prob
lowercase :Tuple = hidden_sizes
lowercase :Dict = num_hidden_layers
lowercase :Any = num_attention_heads
lowercase :Optional[Any] = initializer_range
lowercase :Optional[Any] = layer_norm_eps
lowercase :List[Any] = patch_size
lowercase :int = num_channels
lowercase :str = depths
lowercase :Union[str, Any] = mlp_expansion_ratio
lowercase :str = downsamples
lowercase :Any = dim
lowercase :int = key_dim
lowercase :Any = attention_ratio
lowercase :Union[str, Any] = resolution
lowercase :Union[str, Any] = pool_size
lowercase :Union[str, Any] = downsample_patch_size
lowercase :Union[str, Any] = downsample_stride
lowercase :str = downsample_pad
lowercase :Optional[Any] = drop_path_rate
lowercase :Any = num_metaad_blocks
lowercase :Tuple = distillation
lowercase :List[Any] = use_layer_scale
lowercase :str = layer_scale_init_value
lowercase :List[Any] = image_size
lowercase :Any = batch_norm_eps
| 236 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__: Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__: Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} )
__magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__: bool = field(
default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__a , __a , getattr(__a , __a ) )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case_ : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case_ : List[Any] = SeqaSeqDataset
# Get datasets
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
snake_case_ : List[str] = (
dataset_class(
__a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case_ : Any = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
snake_case_ : List[str] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
snake_case_ : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
snake_case_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case_ : Tuple = train_result.metrics
snake_case_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' )
snake_case_ : str = data_args.n_val
snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' )
snake_case_ : Union[str, Any] = test_output.metrics
snake_case_ : int = data_args.n_test
if trainer.is_world_process_zero():
snake_case_ : List[str] = round(metrics['test_loss'] , 4 )
handle_metrics('test' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
snake_case_ : Any = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
snake_case_ : Any = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327 | 0 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_SCREAMING_SNAKE_CASE = [
'Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
_SCREAMING_SNAKE_CASE = [
'Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .'
' Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def snake_case ( ) -> Any:
_A = calculate_rouge(__a , __a , bootstrap_aggregation=__a , rouge_keys=["""rouge2""", """rougeL"""])
assert isinstance(__a , __a)
_A = calculate_rouge(__a , __a , bootstrap_aggregation=__a , rouge_keys=["""rouge2"""])
assert (
pd.DataFrame(no_aggregation["""rouge2"""]).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""]).fmeasure.mean()
)
def snake_case ( ) -> List[Any]:
_A = 'rougeLsum'
_A = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=[k])[k]
_A = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=[k])[k]
assert score > score_no_sep
def snake_case ( ) -> List[str]:
_A = ['rouge1', 'rouge2', 'rougeL']
_A = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=__a)
_A = calculate_rouge(__a , __a , newline_sep=__a , rouge_keys=__a)
assert score_sep == score_no_sep
def snake_case ( ) -> Optional[int]:
_A = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
_A = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(__a , __a , newline_sep=__a) == calculate_rouge(__a , __a , newline_sep=__a)
def snake_case ( ) -> str:
_A = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
_A = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
_A = calculate_rouge(__a , __a , rouge_keys=["""rougeLsum"""] , newline_sep=__a)['rougeLsum']
_A = calculate_rouge(__a , __a , rouge_keys=["""rougeLsum"""])['rougeLsum']
assert new_score > prev_score
def snake_case ( ) -> Tuple:
_A = Path("""examples/seq2seq/test_data/wmt_en_ro""")
_A = calculate_rouge_path(data_dir.joinpath("""test.source""") , data_dir.joinpath("""test.target"""))
assert isinstance(__a , __a)
_A = calculate_rouge_path(
data_dir.joinpath("""test.source""") , data_dir.joinpath("""test.target""") , bootstrap_aggregation=__a)
assert isinstance(__a , __a)
| 180 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""PoolFormerFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 327 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.0_2 , __A=3 , __A=0.6 , __A=None , ) -> Any:
lowerCAmelCase_ :Optional[int] = parent
lowerCAmelCase_ :Tuple = batch_size
lowerCAmelCase_ :List[Any] = image_size
lowerCAmelCase_ :List[str] = patch_size
lowerCAmelCase_ :List[str] = num_channels
lowerCAmelCase_ :Optional[Any] = is_training
lowerCAmelCase_ :Any = use_labels
lowerCAmelCase_ :Tuple = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Optional[Any] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :Tuple = type_sequence_label_size
lowerCAmelCase_ :List[str] = initializer_range
lowerCAmelCase_ :Optional[Any] = mask_ratio
lowerCAmelCase_ :Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCAmelCase_ :Optional[int] = (image_size // patch_size) ** 2
lowerCAmelCase_ :str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Optional[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = TFViTMAEModel(config=_A )
lowerCAmelCase_ :str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> int:
lowerCAmelCase_ :Any = TFViTMAEForPreTraining(_A )
lowerCAmelCase_ :Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
lowerCAmelCase_ :List[str] = (self.image_size // self.patch_size) ** 2
lowerCAmelCase_ :Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCAmelCase_ :str = 1
lowerCAmelCase_ :Dict = TFViTMAEForPreTraining(_A )
lowerCAmelCase_ :Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ :List[str] = model(_A , training=_A )
lowerCAmelCase_ :Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
(lowerCAmelCase_) :Any = config_and_inputs
lowerCAmelCase_ :Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
UpperCAmelCase_ :str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
UpperCAmelCase_ :Dict = False
UpperCAmelCase_ :Dict = False
UpperCAmelCase_ :List[Any] = False
UpperCAmelCase_ :Dict = False
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[Any] = TFViTMAEModelTester(self )
lowerCAmelCase_ :Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase_ :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[str] = model_class(_A )
lowerCAmelCase_ :Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :Dict = [*signature.parameters.keys()]
lowerCAmelCase_ :Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def __lowerCAmelCase ( self ) -> Dict:
np.random.seed(2 )
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase_ :Optional[Any] = model_class(_A )
lowerCAmelCase_ :Union[str, Any] = self._prepare_for_class(_A , _A )
lowerCAmelCase_ :List[str] = model(_A , noise=_A )
lowerCAmelCase_ :Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
lowerCAmelCase_ :str = model(**_A , noise=_A )
lowerCAmelCase_ :Union[str, Any] = outputs_dict[0].numpy()
lowerCAmelCase_ :Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __lowerCAmelCase ( self ) -> List[Any]:
np.random.seed(2 )
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Tuple = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(__A ):
lowerCAmelCase_ :Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
lowerCAmelCase_ :str = v.numpy()
else:
lowerCAmelCase_ :Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
lowerCAmelCase_ :int = model_class(_A )
lowerCAmelCase_ :List[Any] = self._prepare_for_class(_A , _A )
lowerCAmelCase_ :Any = prepare_numpy_arrays(_A )
lowerCAmelCase_ :List[Any] = model(_A , noise=_A )
lowerCAmelCase_ :List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]:
np.random.seed(2 )
lowerCAmelCase_ :Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowerCAmelCase_ :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ :Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCAmelCase_ :Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def __lowerCAmelCase ( self ) -> Dict:
np.random.seed(2 )
lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , """_keras_serializable""" , _A )
}
lowerCAmelCase_ :List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCAmelCase_ :Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
lowerCAmelCase_ :Optional[Any] = main_layer_class(_A )
lowerCAmelCase_ :List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowerCAmelCase_ :Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
lowerCAmelCase_ :int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ :List[Any] = os.path.join(_A , """keras_model.h5""" )
model.save(_A )
lowerCAmelCase_ :str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
lowerCAmelCase_ :List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
np.random.seed(2 )
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :int = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase_ :Optional[Any] = model_class(_A )
lowerCAmelCase_ :Optional[Any] = self._prepare_for_class(_A , _A )
lowerCAmelCase_ :int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase_ :Any = outputs.last_hidden_state.numpy()
lowerCAmelCase_ :Optional[int] = 0
else:
lowerCAmelCase_ :str = outputs.logits.numpy()
lowerCAmelCase_ :Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
lowerCAmelCase_ :Any = model_class.from_pretrained(_A )
lowerCAmelCase_ :Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
lowerCAmelCase_ :Dict = after_outputs['last_hidden_state'].numpy()
lowerCAmelCase_ :Dict = 0
else:
lowerCAmelCase_ :Any = after_outputs['logits'].numpy()
lowerCAmelCase_ :Optional[Any] = 0
lowerCAmelCase_ :Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def __lowerCAmelCase ( self ) -> str:
np.random.seed(2 )
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Optional[int] = int((config.image_size // config.patch_size) ** 2 )
lowerCAmelCase_ :Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowerCAmelCase_ :str = model_class(_A )
lowerCAmelCase_ :int = self._prepare_for_class(_A , _A )
lowerCAmelCase_ :str = model(_A , noise=_A )
lowerCAmelCase_ :Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
lowerCAmelCase_ :Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowerCAmelCase_ :str = model_class.from_config(model.config )
lowerCAmelCase_ :Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
lowerCAmelCase_ :List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.""" )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def __lowerCAmelCase ( self ) -> Tuple:
pass
@slow
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[Any] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_A )
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> Dict:
np.random.seed(2 )
lowerCAmelCase_ :List[str] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
lowerCAmelCase_ :List[Any] = self.default_image_processor
lowerCAmelCase_ :Dict = prepare_img()
lowerCAmelCase_ :Optional[Any] = image_processor(images=_A , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCAmelCase_ :int = ViTMAEConfig()
lowerCAmelCase_ :List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCAmelCase_ :List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowerCAmelCase_ :Optional[Any] = model(**_A , noise=_A )
# verify the logits
lowerCAmelCase_ :Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
lowerCAmelCase_ :Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 84 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A )
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' )
snake_case_ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ : Optional[Any] = model.generate(**_A )
snake_case_ : int = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
snake_case_ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 327 | 0 |
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Any) -> List[str]:
"""simple docstring"""
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : Optional[Any]=0) -> Optional[Any]:
"""simple docstring"""
return sorted(__a , key=lambda _lowercase: x[column])
def lowerCAmelCase_ ( _lowercase : str , _lowercase : List[Any] , _lowercase : str=float("""inf""")) -> Union[str, Any]:
"""simple docstring"""
for i in range(points_counts - 1):
for j in range(i + 1 , __a):
a__ : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
a__ : Tuple = current_dis
return min_dis
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Any , _lowercase : Optional[int]=float("""inf""")) -> Optional[Any]:
"""simple docstring"""
for i in range(min(6 , points_counts - 1) , __a):
for j in range(max(0 , i - 6) , __a):
a__ : Optional[Any] = euclidean_distance_sqr(points[i] , points[j])
if current_dis < min_dis:
a__ : Union[str, Any] = current_dis
return min_dis
def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : Optional[int] , _lowercase : List[str]) -> Optional[int]:
"""simple docstring"""
# base case
if points_counts <= 3:
return dis_between_closest_pair(__a , __a)
# recursion
a__ : Any = points_counts // 2
a__ : Dict = closest_pair_of_points_sqr(
__a , points_sorted_on_y[:mid] , __a)
a__ : str = closest_pair_of_points_sqr(
__a , points_sorted_on_y[mid:] , points_counts - mid)
a__ : Union[str, Any] = min(__a , __a)
a__ : Tuple = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0]) < closest_pair_dis:
cross_strip.append(__a)
a__ : List[str] = dis_between_closest_in_strip(
__a , len(__a) , __a)
return min(__a , __a)
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = column_based_sort(__a , column=0)
a__ : Dict = column_based_sort(__a , column=1)
return (
closest_pair_of_points_sqr(
__a , __a , __a)
) ** 0.5
if __name__ == "__main__":
_lowercase : Optional[Any] =[(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 170 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = params
snake_case_ : int = np.array(_A )
snake_case_ : Optional[int] = np.array([len(_A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple , _A : Optional[int] ) -> str:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[str] ) -> str:
"""simple docstring"""
return len(self.lengths )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.params.max_model_input_size
snake_case_ : Tuple = self.lengths > max_len
logger.info(F"""Splitting {sum(_A )} too long sequences.""" )
def divide_chunks(_A : Union[str, Any] , _A : Dict ):
return [l[i : i + n] for i in range(0 , len(_A ) , _A )]
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = []
if self.params.mlm:
snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Optional[int] = np.insert(_A , 0 , _A )
if sub_s[-1] != sep_id:
snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A )
assert len(_A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_A )
new_tok_ids.extend(_A )
new_lengths.extend([len(_A ) for l in sub_seqs] )
snake_case_ : Tuple = np.array(_A )
snake_case_ : int = np.array(_A )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = len(self )
snake_case_ : int = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : List[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = len(self )
snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : Any = (unk_occs / self.lengths) < 0.5
snake_case_ : List[Any] = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : Tuple = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = [t[0] for t in batch]
snake_case_ : int = [t[1] for t in batch]
assert len(_A ) == len(_A )
# Max for paddings
snake_case_ : str = max(_A )
# Pad token ids
if self.params.mlm:
snake_case_ : int = self.params.special_tok_ids['pad_token']
else:
snake_case_ : Dict = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids]
assert len(tk_ ) == len(_A )
assert all(len(_A ) == max_seq_len_ for t in tk_ )
snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs)
return tk_t, lg_t
| 327 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : Optional[int] = 500000
_lowerCamelCase , _lowerCamelCase : List[str] = os.path.split(__file__)
_lowerCamelCase : Any = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ , **lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE ( lowercase_ , **lowercase_ ) -> Dict:
"""simple docstring"""
A__ = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
A__ = generate_example_dataset(
os.path.join(__a , '''dataset.arrow''' ) , __a , num_examples=__a )
A__ = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=__a )
def tokenize(lowercase_ ):
return tokenizer(examples['''text'''] )
A__ = map(__a )
A__ = map(__a , batched=__a )
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
with dataset.formatted_as(type='''numpy''' ):
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
with dataset.formatted_as(type='''pandas''' ):
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
A__ = map(__a , function=lambda lowercase_ : None , batched=__a )
A__ = map(__a , function=__a , batched=__a )
A__ = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , '''wb''' ) as f:
f.write(json.dumps(__a ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 14 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
while b:
snake_case_ ,snake_case_ : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE__ ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 327 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def a__ ( _UpperCamelCase : Optional[Any] ):
# Validation
def is_valid_tree(_UpperCamelCase : Union[str, Any] ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
_UpperCamelCase : List[str] ,_UpperCamelCase : List[Any] ,_UpperCamelCase : List[Any] ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Dict = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Dict = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Dict = os.path.join(__a , __a )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving model to {ckpt_dir}""" )
snake_case_ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Optional[Any] = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[Any] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Tuple = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Tuple = (
os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
snake_case_ : List[Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
snake_case_ : Any = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : str = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : Any = os.path.join(__a , __a )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__a , __a )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Union[str, Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : List[Any] = os.path.join(__a , __a )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
snake_case_ : str = (
os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
snake_case_ : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
snake_case_ : Optional[int] = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 327 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
lowerCamelCase__ : Union[str, Any] = namedtuple('covid_data', 'cases deaths recovered')
def UpperCAmelCase_ ( __UpperCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
lowerCamelCase__ : Any = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats())) | 225 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : str = do_resize
snake_case_ : str = size if size is not None else {'shortest_edge': 288}
snake_case_ : Any = size_divisor
snake_case_ : Any = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : int = do_center_crop
snake_case_ : str = image_mean
snake_case_ : int = image_std
snake_case_ : Any = do_pad
snake_case_ : Optional[int] = batch_size
snake_case_ : List[str] = num_channels
snake_case_ : Any = min_resolution
snake_case_ : str = max_resolution
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int:
"""simple docstring"""
if not batched:
snake_case_ : Optional[int] = self.size['shortest_edge']
snake_case_ : List[Any] = image_inputs[0]
if isinstance(_A , Image.Image ):
snake_case_ ,snake_case_ : Optional[Any] = image.size
else:
snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2]
snake_case_ : Dict = size / min(_A , _A )
if h < w:
snake_case_ ,snake_case_ : str = size, scale * w
else:
snake_case_ ,snake_case_ : Tuple = scale * h, size
snake_case_ : Dict = int((1333 / 800) * size )
if max(_A , _A ) > max_size:
snake_case_ : Union[str, Any] = max_size / max(_A , _A )
snake_case_ : Any = newh * scale
snake_case_ : Union[str, Any] = neww * scale
snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 )
snake_case_ ,snake_case_ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case_ : Optional[int] = []
for image in image_inputs:
snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(_A , key=lambda _A : item[0] )[0]
snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'size_divisor' ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 327 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ):
"""simple docstring"""
try:
__A = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__A = default
else:
# KEY is set, convert it to True or False.
try:
__A = strtobool(__a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
lowercase_ = parse_flag_from_env('RUN_SLOW', default=False)
lowercase_ = parse_flag_from_env('RUN_REMOTE', default=False)
lowercase_ = parse_flag_from_env('RUN_LOCAL', default=True)
lowercase_ = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
lowercase_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
lowercase_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
lowercase_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
lowercase_ = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
lowercase_ = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
lowercase_ = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
lowercase_ = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__A = unittest.skip('''test requires faiss''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__A = unittest.skip('''test requires regex''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__A = unittest.skip('''test requires elasticsearch''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__A = unittest.skip('''test requires sqlalchemy''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__A = unittest.skip('''test requires PyTorch''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not config.TF_AVAILABLE:
__A = unittest.skip('''test requires TensorFlow''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not config.JAX_AVAILABLE:
__A = unittest.skip('''test requires JAX''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not config.PIL_AVAILABLE:
__A = unittest.skip('''test requires Pillow''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def _require_spacy_model(__UpperCamelCase ):
try:
import spacy # noqa F401
spacy.load(__a )
except ImportError:
return unittest.skip('''test requires spacy''' )(__a )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(__a ) )(__a )
else:
return test_case
return _require_spacy_model
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(__a )
else:
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__A = unittest.skip('''test is slow''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__A = unittest.skip('''test is local''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__A = unittest.skip('''test is packaged''' )(__a )
return test_case
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__A = unittest.skip('''test requires remote''' )(__a )
return test_case
def lowerCAmelCase ( *__UpperCamelCase ):
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(__a ) and name.startswith('''test''' ):
for decorator in decorators:
__A = decorator(__a )
setattr(cls , __a , __a )
return cls
return decorate
class snake_case ( snake_case_ ):
'''simple docstring'''
pass
class snake_case ( snake_case_ ):
'''simple docstring'''
A_ : Any = 0
A_ : Union[str, Any] = 1
A_ : Optional[Any] = 2
@contextmanager
def lowerCAmelCase ( __UpperCamelCase=OfflineSimulationMode.CONNECTION_FAILS , __UpperCamelCase=1e-1_6 ):
"""simple docstring"""
__A = requests.Session().request
def timeout_request(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
# Change the url to an invalid url so that the connection hangs
__A = 'https://10.255.255.1'
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
__A = timeout
try:
return online_request(__a , __a , **__a )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__A = url
__A = e.args[0]
__A = (max_retry_error.args[0].replace('''10.255.255.1''' , f'OfflineMock[{url}]' ),)
__A = (max_retry_error,)
raise
def raise_connection_error(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=__a )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , __a ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , __a ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , __a ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def lowerCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ):
"""simple docstring"""
__A = str(Path().resolve() )
with tempfile.TemporaryDirectory(*__a , **__a ) as tmp_dir:
try:
os.chdir(__a )
yield
finally:
os.chdir(__a )
@contextmanager
def lowerCAmelCase ( ):
"""simple docstring"""
import gc
gc.collect()
__A = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCAmelCase ( ):
"""simple docstring"""
import gc
gc.collect()
__A = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return deepcopy(__a ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(__a ).integers(0 , 1_0_0 , 1_0 ).tolist()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ):
try:
return func(*__a , **__a )
except HTTPError as err:
if str(__a ).startswith('''500''' ) or str(__a ).startswith('''502''' ):
pytest.xfail(str(__a ) )
raise err
return decorator.decorator(_wrapper , __a )
class snake_case :
'''simple docstring'''
def __init__( self : Dict, _lowerCamelCase : Optional[int], _lowerCamelCase : List[Any], _lowerCamelCase : List[str] ):
'''simple docstring'''
__A = returncode
__A = stdout
__A = stderr
async def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
while True:
__A = await stream.readline()
if line:
callback(__a )
else:
break
async def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False ):
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__a ) )
__A = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__A = []
__A = []
def tee(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ):
__A = line.decode('''utf-8''' ).rstrip()
sink.append(__a )
if not quiet:
print(__a , __a , file=__a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda __UpperCamelCase : tee(__a , __a , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda __UpperCamelCase : tee(__a , __a , sys.stderr , label='''stderr:''' ) ),
] , timeout=__a , )
return _RunOutput(await p.wait() , __a , __a )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=1_8_0 , __UpperCamelCase=False , __UpperCamelCase=True ):
"""simple docstring"""
__A = asyncio.get_event_loop()
__A = loop.run_until_complete(
_stream_subprocess(__a , env=__a , stdin=__a , timeout=__a , quiet=__a , echo=__a ) )
__A = ' '.join(__a )
if result.returncode > 0:
__A = '\n'.join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'\'{cmd_str}\' produced no output.' )
return result
def lowerCAmelCase ( ):
"""simple docstring"""
__A = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
__A = re.sub(r'''^gw''' , '''''' , __a , 0 , re.M )
return int(__a )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = 2_9_5_0_0
__A = pytest_xdist_worker_id()
return port + uniq_delta
| 266 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Tuple = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Dict = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__lowerCAmelCase : List[Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class UpperCAmelCase_ ( snake_case_ ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["input_ids", "attention_mask"]
a__ = MBartTokenizer
a__ = []
a__ = []
def __init__( self : int , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : Any="<pad>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Optional[Any] , ) -> Dict:
"""simple docstring"""
__magic_name__ = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
vocab_file=_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , **_A , )
__magic_name__ = vocab_file
__magic_name__ = False if not self.vocab_file else True
__magic_name__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__magic_name__ = {
lang_code: self.convert_tokens_to_ids(_A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__magic_name__ = src_lang if src_lang is not None else 'en_XX'
__magic_name__ = self.convert_tokens_to_ids(self._src_lang )
__magic_name__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__magic_name__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : str ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ = src_lang
__magic_name__ = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
__magic_name__ = self.convert_tokens_to_ids(_A )
__magic_name__ = tgt_lang_id
return inputs
def _lowercase ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : str , ) -> BatchEncoding:
"""simple docstring"""
__magic_name__ = src_lang
__magic_name__ = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _lowercase ( self : int ) -> Any:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self : Dict , UpperCamelCase__ : Any ) -> None:
"""simple docstring"""
__magic_name__ = self.convert_tokens_to_ids(_A )
__magic_name__ = []
__magic_name__ = [self.eos_token_id, self.cur_lang_code]
__magic_name__ = self.convert_ids_to_tokens(self.prefix_tokens )
__magic_name__ = self.convert_ids_to_tokens(self.suffix_tokens )
__magic_name__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__magic_name__ = self.convert_tokens_to_ids(_A )
__magic_name__ = []
__magic_name__ = [self.eos_token_id, self.cur_lang_code]
__magic_name__ = self.convert_ids_to_tokens(self.prefix_tokens )
__magic_name__ = self.convert_ids_to_tokens(self.suffix_tokens )
__magic_name__ = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowercase ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__magic_name__ = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 88 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ):
snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
_SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 327 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Any = 'hf-internal-testing/tiny-random-t5'
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained(_A )
__lowercase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
__lowercase : List[Any] = tokenizer('''This is me''' , return_tensors='''pt''' )
__lowercase : Any = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__lowercase : Optional[Any] = model.generate(**_A )
__lowercase : int = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__lowercase : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__lowercase : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Any = 'hf-internal-testing/tiny-random-t5'
__lowercase : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
__lowercase : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
__lowercase : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 249 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: List[Any] = VOCAB_FILES_NAMES
__magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION
__magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Union[str, Any] = LxmertTokenizer
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : List[Any] = normalizer_class(**_A )
snake_case_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : str = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 327 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ : List[Any] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
snake_case_ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 0:
return False
snake_case_ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
import inspect
import unittest
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def SCREAMING_SNAKE_CASE ( self: Dict ):
import diffusers
from diffusers.dependency_versions_table import deps
lowercase :Dict = inspect.getmembers(_A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase :int = 'k-diffusion'
elif backend == "invisible_watermark":
lowercase :int = 'invisible-watermark'
assert backend in deps, F"{backend} is not in the deps table!"
| 236 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( snake_case_ ):
"""simple docstring"""
lowerCamelCase :Optional[Any] = ["pixel_values"]
def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = PIL.Image.BICUBIC , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> None:
super().__init__(**_A )
_A = size if size is not None else {'height': 2_56, 'width': 2_56}
_A = get_size_dict(_A )
_A = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_A = get_size_dict(_A , param_name="""crop_size""" )
_A = do_resize
_A = size
_A = resample
_A = do_center_crop
_A = crop_size
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = PIL.Image.BICUBIC , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_A = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_A , size=(size["""height"""], size["""width"""]) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
_A = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> str:
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> PIL.Image.Image:
_A = do_resize if do_resize is not None else self.do_resize
_A = resample if resample is not None else self.resample
_A = do_center_crop if do_center_crop is not None else self.do_center_crop
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = size if size is not None else self.size
_A = get_size_dict(_A )
_A = crop_size if crop_size is not None else self.crop_size
_A = get_size_dict(_A , param_name="""crop_size""" )
_A = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_A = [to_numpy_array(_A ) for image in images]
if do_resize:
_A = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
_A = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
_A = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
_A = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
_A = [to_channel_dimension_format(_A , _A ) for image in images]
_A = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 180 |
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any:
"""simple docstring"""
if tokenize_kwargs is None:
snake_case_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
snake_case_ : int = truncation
snake_case_ : Optional[int] = tokenize_kwargs
snake_case_ : Dict = {}
if return_tensors is not None:
snake_case_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]:
"""simple docstring"""
snake_case_ : Dict = self.framework
snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A )
return model_inputs
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Tuple = self.model(**_A )
return model_outputs
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 327 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : str , lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if len(__a ) != len(__a ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowerCAmelCase_ :List[Any] = [p / w for p, w in zip(__a , __a )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowerCAmelCase_ :str = sorted(__a )
# declaring useful variables
lowerCAmelCase_ :str = len(__a )
lowerCAmelCase_ :Union[str, Any] = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :List[Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowerCAmelCase_ :Tuple = sorted_profit_by_weight[length - i - 1]
lowerCAmelCase_ :List[str] = profit_by_weight.index(__a )
lowerCAmelCase_ :int = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
__UpperCAmelCase = [int(x) for x in input('Input profits separated by spaces: ').split()]
__UpperCAmelCase = [int(x) for x in input('Input weights separated by spaces: ').split()]
__UpperCAmelCase = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 84 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( __a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Any = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __a = 10 ):
return sum(
int(''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : int =logging.get_logger(__name__)
def lowerCAmelCase_ ( _lowercase : List[Any] , _lowercase : Optional[int]=False , _lowercase : List[str]=False , _lowercase : Any=False) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight'''))
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias'''))
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight'''))
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias'''))
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight'''))
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias'''))
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight'''))
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias'''))
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight'''))
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias'''))
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
])
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
])
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
])
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
])
else:
pass
return rename_keys
def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : Optional[int]) -> List[str]:
"""simple docstring"""
for i in range(config.num_hidden_layers):
a__ : Optional[int] = 'vilt.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ : Dict = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''')
a__ : Tuple = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
a__ : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
a__ : List[Any] = in_proj_bias[: config.hidden_size]
a__ : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ : Tuple = in_proj_weight[
-config.hidden_size :, :
]
a__ : Optional[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _lowercase : str) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__a , __a)
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Dict , _lowercase : Optional[int]) -> int:
"""simple docstring"""
a__ : Optional[int] = dct.pop(__a)
a__ : Optional[int] = val
@torch.no_grad()
def lowerCAmelCase_ ( _lowercase : int , _lowercase : str) -> str:
"""simple docstring"""
a__ : Dict = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__a)
a__ : List[Any] = False
a__ : List[Any] = False
a__ : List[str] = False
a__ : Optional[int] = False
if "vqa" in checkpoint_url:
a__ : Optional[Any] = True
a__ : List[str] = 3129
a__ : Any = 'huggingface/label-files'
a__ : Optional[Any] = 'vqa2-id2label.json'
a__ : List[Any] = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""") , """r"""))
a__ : List[str] = {int(__a): v for k, v in idalabel.items()}
a__ : Union[str, Any] = idalabel
a__ : List[Any] = {v: k for k, v in idalabel.items()}
a__ : List[Any] = ViltForQuestionAnswering(__a)
elif "nlvr" in checkpoint_url:
a__ : str = True
a__ : List[Any] = 2
a__ : List[str] = {0: 'False', 1: 'True'}
a__ : Optional[Any] = {v: k for k, v in config.idalabel.items()}
a__ : Tuple = 3
a__ : str = ViltForImagesAndTextClassification(__a)
elif "irtr" in checkpoint_url:
a__ : Optional[Any] = True
a__ : Union[str, Any] = ViltForImageAndTextRetrieval(__a)
elif "mlm_itm" in checkpoint_url:
a__ : str = True
a__ : int = ViltForMaskedLM(__a)
else:
raise ValueError("""Unknown model type""")
# load state_dict of original model, remove and rename some keys
a__ : Optional[int] = torch.hub.load_state_dict_from_url(__a , map_location="""cpu""")['state_dict']
a__ : List[str] = create_rename_keys(__a , __a , __a , __a)
for src, dest in rename_keys:
rename_key(__a , __a , __a)
read_in_q_k_v(__a , __a)
if mlm_model or irtr_model:
a__ : List[str] = ['itm_score.fc.weight', 'itm_score.fc.bias']
for k in ignore_keys:
state_dict.pop(__a , __a)
# load state dict into HuggingFace model
model.eval()
if mlm_model:
a__ : List[Any] = model.load_state_dict(__a , strict=__a)
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__a)
# Define processor
a__ : Union[str, Any] = ViltImageProcessor(size=384)
a__ : Any = BertTokenizer.from_pretrained("""bert-base-uncased""")
a__ : Optional[int] = ViltProcessor(__a , __a)
# Forward pass on example inputs (image + text)
if nlvr_model:
a__ : Optional[Any] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__a).raw)
a__ : Union[str, Any] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__a).raw)
a__ : Any = (
'The left image contains twice the number of dogs as the right image, and at least two dogs in total are'
' standing.'
)
a__ : Dict = processor(__a , __a , return_tensors="""pt""")
a__ : List[str] = processor(__a , __a , return_tensors="""pt""")
a__ : Dict = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
a__ : Dict = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=__a).raw)
if mlm_model:
a__ : Union[str, Any] = 'a bunch of [MASK] laying on a [MASK].'
else:
a__ : Optional[int] = 'How many cats are there?'
a__ : List[str] = processor(__a , __a , return_tensors="""pt""")
a__ : int = model(**__a)
# Verify outputs
if mlm_model:
a__ : List[str] = torch.Size([1, 11, 3_0522])
a__ : Any = torch.tensor([-12.5061, -12.5123, -12.5174])
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __a , atol=1e-4)
# verify masked token prediction equals "cats"
a__ : str = outputs.logits[0, 4, :].argmax(-1).item()
assert tokenizer.decode([predicted_id]) == "cats"
elif vqa_model:
a__ : Tuple = torch.Size([1, 3129])
a__ : Optional[int] = torch.tensor([-15.9495, -18.1472, -10.3041])
assert torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4)
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __a , atol=1e-4)
# verify vqa prediction equals "2"
a__ : Optional[Any] = outputs.logits.argmax(-1).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
a__ : str = torch.Size([1, 2])
a__ : Dict = torch.tensor([-2.8721, 2.1291])
assert torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4)
assert outputs.logits.shape == expected_shape
Path(__a).mkdir(exist_ok=__a)
print(F'''Saving model and processor to {pytorch_dump_folder_path}''')
model.save_pretrained(__a)
processor.save_pretrained(__a)
if __name__ == "__main__":
_lowercase : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowercase : Tuple =parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 170 |
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Any = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__a ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( ) -> Any:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__a ):
http_head('''https://huggingface.co''' )
| 14 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
a_ = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
a_ = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
a_ = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , ):
'''simple docstring'''
__lowerCamelCase = len(references[0] )
if any(len(_A ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(_A )]
__lowerCamelCase = TER(
normalized=_A , no_punct=_A , asian_support=_A , case_sensitive=_A , )
__lowerCamelCase = sb_ter.corpus_score(_A , _A )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 330 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any:
"""simple docstring"""
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : List[str] = patch_size
snake_case_ : List[str] = num_channels
snake_case_ : Optional[Any] = is_training
snake_case_ : Any = use_labels
snake_case_ : Tuple = hidden_size
snake_case_ : Union[str, Any] = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Tuple = type_sequence_label_size
snake_case_ : List[str] = initializer_range
snake_case_ : Optional[Any] = mask_ratio
snake_case_ : Any = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Optional[int] = (image_size // patch_size) ** 2
snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = None
if self.use_labels:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A )
snake_case_ : str = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int:
"""simple docstring"""
snake_case_ : Any = TFViTMAEForPreTraining(_A )
snake_case_ : Optional[Any] = model(_A , training=_A )
# expected sequence length = num_patches
snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2
snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : str = 1
snake_case_ : Dict = TFViTMAEForPreTraining(_A )
snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : List[str] = model(_A , training=_A )
snake_case_ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = self.prepare_config_and_inputs()
((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs
snake_case_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__magic_name__: Dict = False
__magic_name__: Dict = False
__magic_name__: List[Any] = False
__magic_name__: Dict = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = TFViTMAEModelTester(self )
snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_A )
snake_case_ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Dict = [*signature.parameters.keys()]
snake_case_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A )
snake_case_ : List[str] = model(_A , noise=_A )
snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) )
snake_case_ : str = model(**_A , noise=_A )
snake_case_ : Union[str, Any] = outputs_dict[0].numpy()
snake_case_ : Optional[Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_A : int ):
snake_case_ : Any = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_A ):
snake_case_ : str = v.numpy()
else:
snake_case_ : Optional[Any] = np.array(_A )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_A )
snake_case_ : List[Any] = self._prepare_for_class(_A , _A )
snake_case_ : Any = prepare_numpy_arrays(_A )
snake_case_ : List[Any] = model(_A , noise=_A )
snake_case_ : List[Any] = model(**_A , noise=_A )
self.assert_outputs_same(_A , _A )
def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.constant(_A )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Optional[Any] = tf_noise
super().check_pt_tf_models(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_A )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(_A , _A ),)
if isinstance(_A , _A )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_A , '_keras_serializable' , _A )
}
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Optional[int] = tf.convert_to_tensor(_A )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Optional[Any] = main_layer_class(_A )
snake_case_ : List[str] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) )
snake_case_ : int = model(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' )
model.save(_A )
snake_case_ : str = tf.keras.models.load_model(
_A , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_A , tf.keras.Model )
snake_case_ : List[str] = model(_A )
self.assert_outputs_same(_A , _A )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[Any] = model_class(_A )
snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A )
snake_case_ : int = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Any = outputs.last_hidden_state.numpy()
snake_case_ : Optional[int] = 0
else:
snake_case_ : str = outputs.logits.numpy()
snake_case_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
snake_case_ : Any = model_class.from_pretrained(_A )
snake_case_ : Any = model(_A , noise=_A )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Dict = after_outputs['last_hidden_state'].numpy()
snake_case_ : Dict = 0
else:
snake_case_ : Any = after_outputs['logits'].numpy()
snake_case_ : Optional[Any] = 0
snake_case_ : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def UpperCAmelCase_ ( self : Any ) -> str:
"""simple docstring"""
np.random.seed(2 )
snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_A )
snake_case_ : int = self._prepare_for_class(_A , _A )
snake_case_ : str = model(_A , noise=_A )
snake_case_ : Dict = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_A )
snake_case_ : Any = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : str = model_class.from_config(model.config )
snake_case_ : Union[str, Any] = new_model(_A ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : List[str] = new_model(_A , noise=_A )
self.assert_outputs_same(_A , _A )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
np.random.seed(2 )
snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : int = ViTMAEConfig()
snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Optional[Any] = model(**_A , noise=_A )
# verify the logits
snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _A )
snake_case_ : Any = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
| 327 | 0 |
import random
from .binary_exp_mod import bin_exp_mod
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : List[Any]=10_00 ) -> str:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
SCREAMING_SNAKE_CASE_ = n - 1
SCREAMING_SNAKE_CASE_ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
SCREAMING_SNAKE_CASE_ = 0
while count < prec:
SCREAMING_SNAKE_CASE_ = random.randint(2 , n - 1 )
SCREAMING_SNAKE_CASE_ = bin_exp_mod(__a , __a , __a )
if b != 1:
SCREAMING_SNAKE_CASE_ = True
for _ in range(__a ):
if b == n - 1:
SCREAMING_SNAKE_CASE_ = False
break
SCREAMING_SNAKE_CASE_ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase__ : str = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i))) | 225 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : list[list[int]] = []
snake_case_ : list[int] = []
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = sum(__a )
create_state_space_tree(__a , __a , __a , __a , __a , __a )
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ):
if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum:
return
if sum(__a ) == max_sum:
result.append(__a )
return
for index in range(__a , len(__a ) ):
create_state_space_tree(
__a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , )
_SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2]
_SCREAMING_SNAKE_CASE = 9
_SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 327 | 0 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowercase_ = re.compile(R'\s+')
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__a , '''''' , example['''content'''] ).encode('''utf-8''' ) ).hexdigest()}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = [len(__a ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__a ), "line_max": max(__a )}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = np.mean([c.isalnum() for c in example['''content''']] )
return {"alpha_frac": alpha_frac}
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['''hash'''] )
return True
else:
return False
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=5 ):
"""simple docstring"""
__A = ['auto-generated', 'autogenerated', 'automatically generated']
__A = example['content'].splitlines()
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=5 , __UpperCamelCase=0.05 ):
"""simple docstring"""
__A = ['unit tests', 'test file', 'configuration file']
__A = example['content'].splitlines()
__A = 0
__A = 0
# first test
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__A = example['content'].count('''\n''' )
__A = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('''config''' )
count_test += line.lower().count('''test''' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = ['def ', 'class ', 'for ', 'while ']
__A = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase=4 ):
"""simple docstring"""
__A = example['content'].splitlines()
__A = 0
for line in lines:
counter += line.lower().count('''=''' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = tokenizer(example['''content'''] , truncation=__a )['input_ids']
__A = len(example['''content'''] ) / len(__a )
return {"ratio": ratio}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = {}
results.update(get_hash(__a ) )
results.update(line_stats(__a ) )
results.update(alpha_stats(__a ) )
results.update(char_token_ratio(__a ) )
results.update(is_autogenerated(__a ) )
results.update(is_config_or_test(__a ) )
results.update(has_no_keywords(__a ) )
results.update(has_few_assignments(__a ) )
return results
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if not check_uniques(__a , __a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__a , '''rb''' ) as f_in:
with gzip.open(str(__a ) + '''.gz''' , '''wb''' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__a , __a )
os.unlink(__a )
# Settings
lowercase_ = HfArgumentParser(PreprocessingArguments)
lowercase_ = parser.parse_args()
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowercase_ = time.time()
lowercase_ = load_dataset(args.dataset_name, split='train')
print(F'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
lowercase_ = time.time()
lowercase_ = ds.map(preprocess, num_proc=args.num_workers)
print(F'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
lowercase_ = set(ds.unique('hash'))
lowercase_ = len(uniques) / len(ds)
print(F'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
lowercase_ = time.time()
lowercase_ = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(F'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowercase_ = time.time()
lowercase_ , lowercase_ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(F'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
lowercase_ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowercase_ = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowercase_ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowercase_ = str(data_dir / F'''file-{file_number+1:012}.json''')
lowercase_ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'''Time to save dataset: {time.time()-t_start:.2f}''')
| 266 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self : Tuple ) -> Dict:
"""simple docstring"""
__magic_name__ = 'laion/clap-htsat-unfused'
__magic_name__ = tempfile.mkdtemp()
def _lowercase ( self : Union[str, Any] , **UpperCamelCase__ : str ) -> int:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **_A )
def _lowercase ( self : List[str] , **UpperCamelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **_A )
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_feature_extractor()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def _lowercase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__magic_name__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ = self.get_feature_extractor(do_normalize=_A , padding_value=1.0 )
__magic_name__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _A )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
__magic_name__ = floats_list((3, 1000) )
__magic_name__ = feature_extractor(_A , return_tensors="""np""" )
__magic_name__ = processor(audios=_A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
__magic_name__ = 'This is a test string'
__magic_name__ = processor(text=_A )
__magic_name__ = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : int ) -> int:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(_A )
__magic_name__ = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=_A , feature_extractor=_A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 88 |
from math import pi
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 327 | 0 |
"""simple docstring"""
import argparse
import datetime
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : Any = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
__lowercase : List[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__a ) < 11:
raise ValueError('''Must be 10 characters long''' )
# Get month
__lowercase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('''Month must be between 1 - 12''' )
__lowercase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get day
__lowercase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('''Date must be between 1 - 31''' )
# Get second separator
__lowercase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('''Date separator must be \'-\' or \'/\'''' )
# Get year
__lowercase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'''Year out of range. There has to be some sort of limit...right?''' )
# Get datetime obj for validation
__lowercase : Any = datetime.date(int(__a ) , int(__a ) , int(__a ) )
# Start math
if m <= 2:
__lowercase : List[str] = y - 1
__lowercase : List[str] = m + 12
# maths var
__lowercase : int = int(str(__a )[:2] )
__lowercase : int = int(str(__a )[2:] )
__lowercase : int = int(2.6 * m - 5.39 )
__lowercase : int = int(c / 4 )
__lowercase : int = int(k / 4 )
__lowercase : int = int(d + k )
__lowercase : int = int(t + u + v + x )
__lowercase : int = int(z - (2 * c) )
__lowercase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('''The date was evaluated incorrectly. Contact developer.''' )
# Response
__lowercase : str = f"""Your date {date_input}, is a {days[str(__a )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
a_ = parser.parse_args()
zeller(args.date_input)
| 249 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: Optional[Any] = ["pixel_values"]
def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**_A )
snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256}
snake_case_ : Tuple = get_size_dict(_A )
snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case_ : int = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : str = size
snake_case_ : List[str] = resample
snake_case_ : List[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Tuple = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : Any = do_normalize
snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Tuple = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Optional[int] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : str = resample if resample is not None else self.resample
snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : Dict = image_std if image_std is not None else self.image_std
snake_case_ : int = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(_A )
snake_case_ : int = crop_size if crop_size is not None else self.crop_size
snake_case_ : Any = get_size_dict(_A , param_name='crop_size' )
snake_case_ : Optional[Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
snake_case_ : Tuple = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 327 | 0 |
"""simple docstring"""
UpperCAmelCase__ : Dict = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
SCREAMING_SNAKE_CASE__ : Stack[int] = Stack()
SCREAMING_SNAKE_CASE__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__a ) )
elif i in operators:
# RULE 2
operator_stack.push(__a )
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE__ : Tuple = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE__ : List[str] = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE__ : Optional[int] = operators[opr](__a ,__a )
operand_stack.push(__a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 25 |
import sys
_SCREAMING_SNAKE_CASE = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def SCREAMING_SNAKE_CASE__ ( __a = N ):
snake_case_ : Optional[Any] = -sys.maxsize - 1
for i in range(len(__a ) - 12 ):
snake_case_ : Optional[Any] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : int = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Construct model
if openai_config_file == "":
lowercase :Any = OpenAIGPTConfig()
else:
lowercase :List[str] = OpenAIGPTConfig.from_json_file(__a )
lowercase :str = OpenAIGPTModel(__a )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__a, __a, __a )
# Save pytorch-model
lowercase :Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
lowercase :Union[str, Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict(), __a )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__a, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_UpperCAmelCase : str = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 236 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__magic_name__: Optional[str] = field(
default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} )
__magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__magic_name__: Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__magic_name__: Optional[int] = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__magic_name__: Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} )
__magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} )
__magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} )
__magic_name__: bool = field(
default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) )
def SCREAMING_SNAKE_CASE__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses()
check_output_dir(__a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(__a , __a , __a ):
assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(__a , __a , getattr(__a , __a ) )
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__a , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
snake_case_ : Any = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__a , __a ):
snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__a )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
snake_case_ : List[Any] = SeqaSeqDataset
# Get datasets
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
snake_case_ : List[str] = (
dataset_class(
__a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
snake_case_ : List[Any] = (
dataset_class(
__a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
snake_case_ : Any = (
build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None
)
snake_case_ : List[str] = SeqaSeqTrainer(
model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator(
__a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , )
snake_case_ : Optional[int] = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
snake_case_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
snake_case_ : Tuple = train_result.metrics
snake_case_ : List[str] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , __a , training_args.output_dir )
all_metrics.update(__a )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' )
snake_case_ : str = data_args.n_val
snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.do_predict:
logger.info('*** Predict ***' )
snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' )
snake_case_ : Union[str, Any] = test_output.metrics
snake_case_ : int = data_args.n_test
if trainer.is_world_process_zero():
snake_case_ : List[str] = round(metrics['test_loss'] , 4 )
handle_metrics('test' , __a , training_args.output_dir )
all_metrics.update(__a )
if training_args.predict_with_generate:
snake_case_ : Any = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
snake_case_ : Any = lmap(str.strip , __a )
write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def SCREAMING_SNAKE_CASE__ ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 327 | 0 |
def snake_case ( snake_case__ :Optional[Any]) -> Any:
_A = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_A = ''
_A = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__a) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_A = 0, 0
# length[i] shows the length of palindromic substring with center i
_A = [1 for i in range(len(__a))]
# for each character in new_string find corresponding palindromic string
_A = 0
for j in range(len(__a)):
_A = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1)
while (
j - k >= 0
and j + k < len(__a)
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_A = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_A = j - k + 1 # noqa: E741
_A = j + k - 1
# update max_length and start position
if max_length < length[j]:
_A = length[j]
_A = j
# create that string
_A = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""PoolFormerFeatureExtractor"""]
_SCREAMING_SNAKE_CASE = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 327 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ :Union[str, Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase_ :Tuple = dict(zip(_A , range(len(_A ) ) ) )
lowerCAmelCase_ :List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase_ :str = {'unk_token': '<unk>'}
lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
lowerCAmelCase_ :Tuple = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCAmelCase_ :Optional[Any] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_A , _A )
def __lowerCAmelCase ( self , **__A ) -> Dict:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self , **__A ) -> List[str]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self , **__A ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase_ :List[Any] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
lowerCAmelCase_ :Dict = self.get_rust_tokenizer()
lowerCAmelCase_ :List[Any] = self.get_image_processor()
lowerCAmelCase_ :int = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
lowerCAmelCase_ :int = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :int = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :List[str] = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
lowerCAmelCase_ :List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Optional[int] = self.get_image_processor()
lowerCAmelCase_ :List[Any] = self.get_tokenizer()
lowerCAmelCase_ :List[str] = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ :str = self.prepare_image_inputs()
lowerCAmelCase_ :Any = image_processor(_A , return_tensors="""np""" )
lowerCAmelCase_ :int = processor(images=_A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Tuple = self.get_image_processor()
lowerCAmelCase_ :int = self.get_tokenizer()
lowerCAmelCase_ :Dict = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ :Tuple = 'lower newer'
lowerCAmelCase_ :List[Any] = processor(text=_A )
lowerCAmelCase_ :Optional[int] = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[Any] = self.get_image_processor()
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :str = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ :Optional[Any] = 'lower newer'
lowerCAmelCase_ :str = self.prepare_image_inputs()
lowerCAmelCase_ :Tuple = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Union[str, Any] = self.get_image_processor()
lowerCAmelCase_ :List[str] = self.get_tokenizer()
lowerCAmelCase_ :int = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ :List[Any] = self.prepare_image_inputs()
lowerCAmelCase_ :List[str] = self.prepare_image_inputs()
lowerCAmelCase_ :Tuple = processor(images=_A , visual_prompt=_A )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.get_image_processor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :int = CLIPSegProcessor(tokenizer=_A , image_processor=_A )
lowerCAmelCase_ :Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Union[str, Any] = processor.batch_decode(_A )
lowerCAmelCase_ :str = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 84 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A )
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' )
snake_case_ : Any = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case_ : Optional[Any] = model.generate(**_A )
snake_case_ : int = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case_ : Optional[Any] = model_reloaded.generate(**_A )
self.assertTrue(torch.allclose(_A , _A ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A )
snake_case_ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_A ):
model.save_pretrained(_A )
snake_case_ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(_A )
| 327 | 0 |
import requests
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : List[Any]) -> int:
"""simple docstring"""
a__ : Dict = {'Content-Type': 'application/json'}
a__ : int = requests.post(__a , json={"""text""": message_body} , headers=__a)
if response.status_code != 200:
a__ : List[str] = (
'Request to slack returned an error '
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__a)
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 170 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = params
snake_case_ : int = np.array(_A )
snake_case_ : Optional[int] = np.array([len(_A ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : Tuple , _A : Optional[int] ) -> str:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : List[str] ) -> str:
"""simple docstring"""
return len(self.lengths )
def UpperCAmelCase_ ( self : Dict ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCAmelCase_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Dict = self.params.max_model_input_size
snake_case_ : Tuple = self.lengths > max_len
logger.info(F"""Splitting {sum(_A )} too long sequences.""" )
def divide_chunks(_A : Union[str, Any] , _A : Dict ):
return [l[i : i + n] for i in range(0 , len(_A ) , _A )]
snake_case_ : Dict = []
snake_case_ : Union[str, Any] = []
if self.params.mlm:
snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
snake_case_ : List[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
snake_case_ : Optional[int] = np.insert(_A , 0 , _A )
if sub_s[-1] != sep_id:
snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A )
assert len(_A ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_A )
new_tok_ids.extend(_A )
new_lengths.extend([len(_A ) for l in sub_seqs] )
snake_case_ : Tuple = np.array(_A )
snake_case_ : int = np.array(_A )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = len(self )
snake_case_ : int = self.lengths > 11
snake_case_ : Dict = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : List[Any] = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = len(self )
snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
snake_case_ : Any = (unk_occs / self.lengths) < 0.5
snake_case_ : List[Any] = self.token_ids[indices]
snake_case_ : int = self.lengths[indices]
snake_case_ : Tuple = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Any = [t[0] for t in batch]
snake_case_ : int = [t[1] for t in batch]
assert len(_A ) == len(_A )
# Max for paddings
snake_case_ : str = max(_A )
# Pad token ids
if self.params.mlm:
snake_case_ : int = self.params.special_tok_ids['pad_token']
else:
snake_case_ : Dict = self.params.special_tok_ids['unk_token']
snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids]
assert len(tk_ ) == len(_A )
assert all(len(_A ) == max_seq_len_ for t in tk_ )
snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs)
return tk_t, lg_t
| 327 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]=13 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=99 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : List[Any]=5 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Dict="gelu" , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : Tuple=None , ) ->Optional[int]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = weight_tying
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = GPTNeoXJapaneseModel(config=_A)
model.to(_A)
model.eval()
A__ = model(_A , attention_mask=_A)
A__ = model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any]) ->List[str]:
'''simple docstring'''
A__ = True
A__ = GPTNeoXJapaneseModel(_A)
model.to(_A)
model.eval()
A__ = model(_A , attention_mask=_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any]) ->int:
'''simple docstring'''
A__ = GPTNeoXJapaneseForCausalLM(config=_A)
model.to(_A)
model.eval()
A__ = model(_A , attention_mask=_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = True
A__ = GPTNeoXJapaneseForCausalLM(config=_A)
model.to(_A)
model.eval()
# first forward pass
A__ = model(_A , attention_mask=_A , use_cache=_A)
A__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size)
A__ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1)
A__ = torch.cat([input_mask, next_mask] , dim=-1)
A__ = model(_A , attention_mask=_A , output_hidden_states=_A)
A__ = output_from_no_past['hidden_states'][0]
A__ = model(
_A , attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1]).item()
A__ = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCAmelCase__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = GPTNeoXJapaneseModelTester(self)
A__ = ConfigTester(self , config_class=_A , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
A__ = None
self.model_tester.create_and_check_model_as_decoder(_A , _A , _A)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_A , _A , _A)
def SCREAMING_SNAKE_CASE ( self : Any) ->Dict:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_A)
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
'''simple docstring'''
A__ = 'abeja/gpt-neox-japanese-2.7b'
A__ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
A__ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
A__ = GPTNeoXJapaneseTokenizer.from_pretrained(_A)
A__ = GPTNeoXJapaneseForCausalLM.from_pretrained(_A)
A__ = []
for prompt in prompts:
A__ = tokenizer(_A , return_tensors='''pt''').input_ids
A__ = model.generate(_A , max_length=50)
A__ = tokenizer.batch_decode(_A , skip_special_tokens=_A)
predicted_outputs += generated_string
self.assertListEqual(_A , _A)
| 14 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
while b:
snake_case_ ,snake_case_ : Any = b, a % b
return a
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
return a if b == 0 else euclidean_gcd_recursive(__a , a % b )
def SCREAMING_SNAKE_CASE__ ( ):
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 327 | 0 |
from __future__ import annotations
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : List[str] ):
print(F"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(__a ):
print(F"""{i}\t\t{d}""" )
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : str ,_UpperCamelCase : List[str] ):
for j in range(__a ):
__lowerCamelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Optional[Any] ,_UpperCamelCase : Dict ,_UpperCamelCase : Union[str, Any] ):
__lowerCamelCase = [float('''inf''' )] * vertex_count
__lowerCamelCase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__a ):
__lowerCamelCase = (graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowerCamelCase = distance[u] + w
__lowerCamelCase = check_negative_cycle(__a ,__a ,__a )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = int(input("""Enter number of vertices: """).strip())
a_ = int(input("""Enter number of edges: """).strip())
a_ = [{} for _ in range(E)]
for i in range(E):
print("""Edge """, i + 1)
a_ , a_ , a_ = (
int(x)
for x in input("""Enter source, destination, weight: """).strip().split(""" """)
)
a_ = {"""src""": src, """dst""": dest, """weight""": weight}
a_ = int(input("""\nEnter shortest path source:""").strip())
a_ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 330 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Dict = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Dict = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Dict = os.path.join(__a , __a )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving model to {ckpt_dir}""" )
snake_case_ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Optional[Any] = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[Any] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Tuple = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Tuple = (
os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
snake_case_ : List[Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
snake_case_ : Any = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : str = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : Any = os.path.join(__a , __a )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__a , __a )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Union[str, Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : List[Any] = os.path.join(__a , __a )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
snake_case_ : str = (
os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
snake_case_ : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
snake_case_ : Optional[int] = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 327 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
def __init__( self : str , _lowerCAmelCase : UNetaDModel , _lowerCAmelCase : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : Optional[Any] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = self.unet.config.sample_size
SCREAMING_SNAKE_CASE_ = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE_ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
SCREAMING_SNAKE_CASE_ = randn_tensor(_A , generator=_A , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
SCREAMING_SNAKE_CASE_ = self.scheduler.schedule[t]
SCREAMING_SNAKE_CASE_ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
SCREAMING_SNAKE_CASE_ = self.scheduler.add_noise_to_input(_A , _A , generator=_A )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
SCREAMING_SNAKE_CASE_ = self.scheduler.step(_A , _A , _A , _A )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
SCREAMING_SNAKE_CASE_ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
SCREAMING_SNAKE_CASE_ = self.scheduler.step_correct(
_A , _A , _A , _A , step_output.prev_sample , step_output['derivative'] , )
SCREAMING_SNAKE_CASE_ = step_output.prev_sample
SCREAMING_SNAKE_CASE_ = (sample / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A ) | 225 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : str = do_resize
snake_case_ : str = size if size is not None else {'shortest_edge': 288}
snake_case_ : Any = size_divisor
snake_case_ : Any = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : int = do_center_crop
snake_case_ : str = image_mean
snake_case_ : int = image_std
snake_case_ : Any = do_pad
snake_case_ : Optional[int] = batch_size
snake_case_ : List[str] = num_channels
snake_case_ : Any = min_resolution
snake_case_ : str = max_resolution
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int:
"""simple docstring"""
if not batched:
snake_case_ : Optional[int] = self.size['shortest_edge']
snake_case_ : List[Any] = image_inputs[0]
if isinstance(_A , Image.Image ):
snake_case_ ,snake_case_ : Optional[Any] = image.size
else:
snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2]
snake_case_ : Dict = size / min(_A , _A )
if h < w:
snake_case_ ,snake_case_ : str = size, scale * w
else:
snake_case_ ,snake_case_ : Tuple = scale * h, size
snake_case_ : Dict = int((1333 / 800) * size )
if max(_A , _A ) > max_size:
snake_case_ : Union[str, Any] = max_size / max(_A , _A )
snake_case_ : Any = newh * scale
snake_case_ : Union[str, Any] = neww * scale
snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 )
snake_case_ ,snake_case_ : int = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
snake_case_ : Optional[int] = []
for image in image_inputs:
snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(_A , key=lambda _A : item[0] )[0]
snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : int = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'image_mean' ) )
self.assertTrue(hasattr(_A , 'image_std' ) )
self.assertTrue(hasattr(_A , 'do_normalize' ) )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'size_divisor' ) )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values
snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 327 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
if "model" in orig_key:
__A = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
__A = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
__A = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
__A = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
__A = orig_key.split('''.''' )[0].split('''_''' )[-1]
__A = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
__A = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
__A = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
__A = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
__A = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
__A = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
__A = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
__A = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
__A = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
__A = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
__A = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
__A = 'yoso.' + orig_key
return orig_key
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__A = orig_state_dict.pop(__a )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
__A = val
__A = orig_state_dict['cls.predictions.decoder.bias']
__A = torch.arange(__a ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = torch.load(__a , map_location='''cpu''' )['model_state_dict']
__A = YosoConfig.from_json_file(__a )
__A = YosoForMaskedLM(__a )
__A = convert_checkpoint_helper(config.max_position_embeddings , __a )
print(model.load_state_dict(__a ) )
model.eval()
model.save_pretrained(__a )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 266 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50_00_00
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : int = dataset.map(**__a )
@get_duration
def SCREAMING_SNAKE_CASE__ ( __a , **__a ):
snake_case_ : Dict = dataset.filter(**__a )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
snake_case_ : List[Any] = generate_example_dataset(
os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a )
snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a )
def tokenize(__a ):
return tokenizer(examples['text'] )
snake_case_ : Any = map(__a )
snake_case_ : Tuple = map(__a , batched=__a )
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='numpy' ):
snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='pandas' ):
snake_case_ : str = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='torch' , columns='numbers' ):
snake_case_ : int = map(__a , function=lambda __a : None , batched=__a )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a )
snake_case_ : int = map(__a , function=__a , batched=__a )
snake_case_ : Optional[Any] = filter(__a )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__a , 'wb' ) as f:
f.write(json.dumps(__a ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 327 | 0 |
from scipy.stats import pearsonr
import datasets
__lowerCAmelCase : int = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
__lowerCAmelCase : Any = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
__lowerCAmelCase : Tuple = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict=False ) -> List[Any]:
"""simple docstring"""
if return_pvalue:
__magic_name__ = pearsonr(_A , _A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_A , _A )[0] )}
| 88 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""")
def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ):
snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) )
_SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 327 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 249 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": 5_12,
}
_SCREAMING_SNAKE_CASE = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
__magic_name__: List[Any] = VOCAB_FILES_NAMES
__magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION
__magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Union[str, Any] = LxmertTokenizer
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _A ) != do_lower_case
or normalizer_state.get('strip_accents' , _A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars
):
snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : int = strip_accents
snake_case_ : Optional[Any] = tokenize_chinese_chars
snake_case_ : List[Any] = normalizer_class(**_A )
snake_case_ : Tuple = do_lower_case
def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : str = [self.sep_token_id]
snake_case_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 327 | 0 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Any = '▁'
UpperCAmelCase__ : Optional[int] = {'vocab_file': 'prophetnet.tokenizer'}
UpperCAmelCase__ : List[Any] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
UpperCAmelCase__ : Optional[Any] = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
UpperCAmelCase__ : List[str] = {
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = collections.OrderedDict()
with open(__a ,"""r""" ,encoding="""utf-8""" ) as reader:
SCREAMING_SNAKE_CASE__ : int = reader.readlines()
for index, token in enumerate(__a ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token.rstrip("""\n""" )
SCREAMING_SNAKE_CASE__ : List[Any] = index
return vocab
class lowerCAmelCase_ (snake_case_ ):
"""simple docstring"""
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Any = ["input_ids", "attention_mask"]
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , sep_token=_A , unk_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
SCREAMING_SNAKE_CASE__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''[unused{i}]'''
SCREAMING_SNAKE_CASE__ : str = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 12
SCREAMING_SNAKE_CASE__ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_A )
def __getstate__(self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return state
def __setstate__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return ([0] * len(_A )) + [1]
return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __magic_name__ (self ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = ''.join(_A ).replace(_A , """ """ ).strip()
return out_string
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : Dict = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 25 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
snake_case_ : int = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__a )
if number < 0:
return False
snake_case_ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_UpperCAmelCase : Dict = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
_UpperCAmelCase : Optional[Any] = {
"169M": 768,
"430M": 1024,
"1B5": 2048,
"3B": 2560,
"7B": 4096,
"14B": 5120,
}
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = list(state_dict.keys() )
for name in state_dict_keys:
lowercase :Any = state_dict.pop(__a )
# emb -> embedding
if name.startswith("emb." ):
lowercase :Any = name.replace("emb.", "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
lowercase :List[str] = name.replace("blocks.0.ln0", "blocks.0.pre_ln" )
# att -> attention
lowercase :Any = re.sub(r"blocks\.(\d+)\.att", r"blocks.\1.attention", __a )
# ffn -> feed_forward
lowercase :Optional[Any] = re.sub(r"blocks\.(\d+)\.ffn", r"blocks.\1.feed_forward", __a )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
lowercase :Dict = name.replace(".time_mix_k", ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
lowercase :str = name.replace(".time_mix_v", ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
lowercase :List[str] = name.replace(".time_mix_r", ".time_mix_receptance" )
if name != "head.weight":
lowercase :List[str] = 'rwkv.' + name
lowercase :Tuple = weight
return state_dict
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
lowercase :int = 50277
lowercase :str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
lowercase :List[str] = PreTrainedTokenizerFast(tokenizer_file=__a )
lowercase :Any = len(__a )
tokenizer.save_pretrained(__a )
# 2. Build the config
lowercase :Tuple = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowercase :Any = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
lowercase :Union[str, Any] = RwkvConfig(
vocab_size=__a, num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size], hidden_size=HIDEN_SIZE_MAPPING[size], )
config.save_pretrained(__a )
# 3. Download model file then convert state_dict
lowercase :str = hf_hub_download(__a, __a )
lowercase :Union[str, Any] = torch.load(__a, map_location="cpu" )
lowercase :Any = convert_state_dict(__a )
# 4. Split in shards and save
lowercase :Union[str, Any] = shard_checkpoint(__a )
for shard_file, shard in shards.items():
torch.save(__a, os.path.join(__a, __a ) )
if index is not None:
lowercase :Optional[int] = os.path.join(__a, __a )
# Save the index as well
with open(__a, "w", encoding="utf-8" ) as f:
lowercase :List[Any] = json.dumps(__a, indent=2, sort_keys=__a ) + '\n'
f.write(__a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model." )
lowercase :str = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowercase :Any = torch.load(os.path.join(__a, __a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()}, os.path.join(__a, __a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
lowercase :int = AutoModelForCausalLM.from_pretrained(__a )
model.push_to_hub(__a, max_shard_size="2GB" )
tokenizer.push_to_hub(__a )
if __name__ == "__main__":
_UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 236 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 | 0 |
import fire
from utils import calculate_rouge, save_json
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict , snake_case__ :str=None , **snake_case__ :int) -> Optional[Any]:
_A = [x.strip() for x in open(__a).readlines()]
_A = [x.strip() for x in open(__a).readlines()][: len(__a)]
_A = calculate_rouge(__a , __a , **__a)
if save_path is not None:
save_json(__a , __a , indent=__a)
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 180 |
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any:
"""simple docstring"""
if tokenize_kwargs is None:
snake_case_ : Optional[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
snake_case_ : int = truncation
snake_case_ : Optional[int] = tokenize_kwargs
snake_case_ : Dict = {}
if return_tensors is not None:
snake_case_ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]:
"""simple docstring"""
snake_case_ : Dict = self.framework
snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A )
return model_inputs
def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Tuple = self.model(**_A )
return model_outputs
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 327 | 0 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__UpperCAmelCase = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
__UpperCAmelCase = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
__UpperCAmelCase = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
__UpperCAmelCase = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def __lowerCAmelCase ( self , __A , __A , __A=0.9 , __A=3 , __A=0.5 ) -> Union[str, Any]:
if NLTK_VERSION >= version.Version("""3.6.5""" ):
lowerCAmelCase_ :Union[str, Any] = [
meteor_score.single_meteor_score(
word_tokenize(_A ) , word_tokenize(_A ) , alpha=_A , beta=_A , gamma=_A )
for ref, pred in zip(_A , _A )
]
else:
lowerCAmelCase_ :List[Any] = [
meteor_score.single_meteor_score(_A , _A , alpha=_A , beta=_A , gamma=_A )
for ref, pred in zip(_A , _A )
]
return {"meteor": np.mean(_A )}
| 84 |
from itertools import permutations
def SCREAMING_SNAKE_CASE__ ( __a ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
snake_case_ : Any = [7, 11, 13, 17]
for i, test in enumerate(__a ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( __a = 10 ):
return sum(
int(''.join(map(__a , __a ) ) )
for num in permutations(range(__a ) )
if is_substring_divisible(__a ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 327 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowercase : Optional[Any] =logging.get_logger(__name__)
_lowercase : str ={
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowerCAmelCase_ ( _lowercase : int , _lowercase : List[str] , _lowercase : str , _lowercase : str , _lowercase : Any) -> str:
"""simple docstring"""
for attribute in key.split("""."""):
a__ : Tuple = getattr(__a , __a)
if weight_type is not None:
a__ : Optional[int] = getattr(__a , __a).shape
else:
a__ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a__ : int = value
elif weight_type == "weight_g":
a__ : int = value
elif weight_type == "weight_v":
a__ : Optional[Any] = value
elif weight_type == "bias":
a__ : Any = value
else:
a__ : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Dict , _lowercase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
a__ : Any = []
a__ : Union[str, Any] = fairseq_model.state_dict()
a__ : Dict = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == """group""" , )
a__ : int = True
else:
for key, mapped_key in MAPPING.items():
a__ : Union[str, Any] = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
a__ : str = True
if "*" in mapped_key:
a__ : Any = name.split(__a)[0].split(""".""")[-2]
a__ : Dict = mapped_key.replace("""*""" , __a)
if "weight_g" in name:
a__ : Any = 'weight_g'
elif "weight_v" in name:
a__ : Dict = 'weight_v'
elif "weight" in name:
a__ : Tuple = 'weight'
elif "bias" in name:
a__ : Tuple = 'bias'
else:
a__ : Tuple = None
set_recursively(__a , __a , __a , __a , __a)
continue
if not is_used:
unused_weights.append(__a)
logger.warning(F'''Unused weights: {unused_weights}''')
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : str , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Any) -> List[Any]:
"""simple docstring"""
a__ : Any = full_name.split("""conv_layers.""")[-1]
a__ : Tuple = name.split(""".""")
a__ : Optional[int] = int(items[0])
a__ : List[str] = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a__ : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a__ : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a__ : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(__a)
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Optional[Any]) -> Dict:
"""simple docstring"""
a__ : Optional[int] = SEWConfig()
if is_finetuned:
a__ : Tuple = model.wav_encoder.wav_model.cfg
else:
a__ : List[str] = model.cfg
a__ : Dict = fs_config.conv_bias
a__ : int = eval(fs_config.conv_feature_layers)
a__ : Dict = [x[0] for x in conv_layers]
a__ : Any = [x[1] for x in conv_layers]
a__ : Optional[int] = [x[2] for x in conv_layers]
a__ : Any = 'gelu'
a__ : Optional[Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
a__ : Any = 0.0
a__ : Any = fs_config.activation_fn.name
a__ : Optional[Any] = fs_config.encoder_embed_dim
a__ : List[Any] = 0.02
a__ : Optional[Any] = fs_config.encoder_ffn_embed_dim
a__ : int = 1e-5
a__ : Tuple = fs_config.encoder_layerdrop
a__ : Optional[int] = fs_config.encoder_attention_heads
a__ : Any = fs_config.conv_pos_groups
a__ : int = fs_config.conv_pos
a__ : Optional[Any] = len(__a)
a__ : Optional[Any] = fs_config.encoder_layers
a__ : Any = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
a__ : Union[str, Any] = model.cfg
a__ : List[str] = fs_config.final_dropout
a__ : Optional[int] = fs_config.layerdrop
a__ : Optional[int] = fs_config.activation_dropout
a__ : List[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
a__ : str = fs_config.attention_dropout
a__ : Tuple = fs_config.dropout_input
a__ : Optional[Any] = fs_config.dropout
a__ : Optional[Any] = fs_config.mask_channel_length
a__ : Dict = fs_config.mask_channel_prob
a__ : Dict = fs_config.mask_length
a__ : List[Any] = fs_config.mask_prob
a__ : int = 'Wav2Vec2FeatureExtractor'
a__ : List[str] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : List[Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=True) -> str:
"""simple docstring"""
if is_finetuned:
a__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""")[:-1])})
else:
a__ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
if config_path is not None:
a__ : List[str] = SEWConfig.from_pretrained(__a)
else:
a__ : str = convert_config(model[0] , __a)
a__ : Tuple = model[0].eval()
a__ : Optional[int] = True if config.feat_extract_norm == 'layer' else False
a__ : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
if is_finetuned:
if dict_path:
a__ : Tuple = Dictionary.load(__a)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ : Optional[int] = target_dict.pad_index
a__ : str = target_dict.bos_index
a__ : int = target_dict.pad_index
a__ : Any = target_dict.bos_index
a__ : Union[str, Any] = target_dict.eos_index
a__ : str = len(target_dict.symbols)
a__ : Union[str, Any] = os.path.join(__a , """vocab.json""")
if not os.path.isdir(__a):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__a))
return
os.makedirs(__a , exist_ok=__a)
with open(__a , """w""" , encoding="""utf-8""") as vocab_handle:
json.dump(target_dict.indices , __a)
a__ : Tuple = WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__a , )
a__ : Optional[int] = WavaVecaProcessor(feature_extractor=__a , tokenizer=__a)
processor.save_pretrained(__a)
a__ : Dict = SEWForCTC(__a)
else:
a__ : List[str] = SEWModel(__a)
feature_extractor.save_pretrained(__a)
recursively_load_weights(__a , __a , __a)
hf_model.save_pretrained(__a)
if __name__ == "__main__":
_lowercase : int =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_lowercase : List[str] =parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 170 |
from __future__ import annotations
from collections import namedtuple
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Any = namedtuple('result' , 'name value' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('Only one argument must be 0' )
elif power < 0:
raise ValueError(
'Power cannot be negative in any electrical/electronics system' )
elif voltage == 0:
return result('voltage' , power / current )
elif current == 0:
return result('current' , power / voltage )
elif power == 0:
return result('power' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
A__ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A__ = s_dict.pop(__a )
elif "subsample" in key:
A__ = s_dict.pop(__a )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = emb.weight.shape
A__ = nn.Linear(__a , __a , bias=__a )
A__ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ = torch.load(__a , map_location='''cpu''' )
A__ = mam_aaa['args']
A__ = mam_aaa['model']
A__ = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(__a )
rename_keys(__a )
A__ = state_dict['decoder.embed_tokens.weight'].shape[0]
A__ = args.share_decoder_input_output_embed
A__ = [int(__a ) for i in args.conv_kernel_sizes.split(''',''' )]
A__ = SpeechaTextConfig(
vocab_size=__a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__a ) , conv_channels=args.conv_channels , conv_kernel_sizes=__a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__a , num_beams=5 , max_length=200 , use_cache=__a , decoder_start_token_id=2 , early_stopping=__a , )
A__ = SpeechaTextForConditionalGeneration(__a )
A__ = model.model.load_state_dict(__a , strict=__a )
if len(__a ) > 0 and not set(__a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
A__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ = lm_head_weights
model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCamelCase : List[Any] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 14 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.